Multitenant architecture

21 Topics
"}},"componentScriptGroups({\"componentId\":\"custom.widget.MicrosoftFooter\"})":{"__typename":"ComponentScriptGroups","scriptGroups":{"__typename":"ComponentScriptGroupsDefinition","afterInteractive":{"__typename":"PageScriptGroupDefinition","group":"AFTER_INTERACTIVE","scriptIds":[]},"lazyOnLoad":{"__typename":"PageScriptGroupDefinition","group":"LAZY_ON_LOAD","scriptIds":[]}},"componentScripts":[]},"cachedText({\"lastModified\":\"1745505307000\",\"locale\":\"en-US\",\"namespaces\":[\"components/community/NavbarDropdownToggle\"]})":[{"__ref":"CachedAsset:text:en_US-components/community/NavbarDropdownToggle-1745505307000"}],"cachedText({\"lastModified\":\"1745505307000\",\"locale\":\"en-US\",\"namespaces\":[\"components/messages/MessageListTabs\"]})":[{"__ref":"CachedAsset:text:en_US-components/messages/MessageListTabs-1745505307000"}],"cachedText({\"lastModified\":\"1745505307000\",\"locale\":\"en-US\",\"namespaces\":[\"components/messages/MessageView/MessageViewInline\"]})":[{"__ref":"CachedAsset:text:en_US-components/messages/MessageView/MessageViewInline-1745505307000"}],"cachedText({\"lastModified\":\"1745505307000\",\"locale\":\"en-US\",\"namespaces\":[\"shared/client/components/common/Pager/PagerLoadMore\"]})":[{"__ref":"CachedAsset:text:en_US-shared/client/components/common/Pager/PagerLoadMore-1745505307000"}],"cachedText({\"lastModified\":\"1745505307000\",\"locale\":\"en-US\",\"namespaces\":[\"shared/client/components/common/OverflowNav\"]})":[{"__ref":"CachedAsset:text:en_US-shared/client/components/common/OverflowNav-1745505307000"}],"cachedText({\"lastModified\":\"1745505307000\",\"locale\":\"en-US\",\"namespaces\":[\"components/users/UserLink\"]})":[{"__ref":"CachedAsset:text:en_US-components/users/UserLink-1745505307000"}],"cachedText({\"lastModified\":\"1745505307000\",\"locale\":\"en-US\",\"namespaces\":[\"components/messages/MessageSubject\"]})":[{"__ref":"CachedAsset:text:en_US-components/messages/MessageSubject-1745505307000"}],"cachedText({\"lastModified\":\"1745505307000\",\"locale\":\"en-US\",\"namespaces\":[\"components/messages/MessageTime\"]})":[{"__ref":"CachedAsset:text:en_US-components/messages/MessageTime-1745505307000"}],"cachedText({\"lastModified\":\"1745505307000\",\"locale\":\"en-US\",\"namespaces\":[\"shared/client/components/nodes/NodeIcon\"]})":[{"__ref":"CachedAsset:text:en_US-shared/client/components/nodes/NodeIcon-1745505307000"}],"cachedText({\"lastModified\":\"1745505307000\",\"locale\":\"en-US\",\"namespaces\":[\"components/messages/MessageUnreadCount\"]})":[{"__ref":"CachedAsset:text:en_US-components/messages/MessageUnreadCount-1745505307000"}],"cachedText({\"lastModified\":\"1745505307000\",\"locale\":\"en-US\",\"namespaces\":[\"components/messages/MessageViewCount\"]})":[{"__ref":"CachedAsset:text:en_US-components/messages/MessageViewCount-1745505307000"}],"cachedText({\"lastModified\":\"1745505307000\",\"locale\":\"en-US\",\"namespaces\":[\"components/kudos/KudosCount\"]})":[{"__ref":"CachedAsset:text:en_US-components/kudos/KudosCount-1745505307000"}],"cachedText({\"lastModified\":\"1745505307000\",\"locale\":\"en-US\",\"namespaces\":[\"components/messages/MessageRepliesCount\"]})":[{"__ref":"CachedAsset:text:en_US-components/messages/MessageRepliesCount-1745505307000"}],"cachedText({\"lastModified\":\"1745505307000\",\"locale\":\"en-US\",\"namespaces\":[\"components/messages/MessageBody\"]})":[{"__ref":"CachedAsset:text:en_US-components/messages/MessageBody-1745505307000"}],"cachedText({\"lastModified\":\"1745505307000\",\"locale\":\"en-US\",\"namespaces\":[\"shared/client/components/users/UserAvatar\"]})":[{"__ref":"CachedAsset:text:en_US-shared/client/components/users/UserAvatar-1745505307000"}]},"Theme:customTheme1":{"__typename":"Theme","id":"customTheme1"},"User:user:-1":{"__typename":"User","id":"user:-1","uid":-1,"login":"Deleted","email":"","avatar":null,"rank":null,"kudosWeight":1,"registrationData":{"__typename":"RegistrationData","status":"ANONYMOUS","registrationTime":null,"confirmEmailStatus":false,"registrationAccessLevel":"VIEW","ssoRegistrationFields":[]},"ssoId":null,"profileSettings":{"__typename":"ProfileSettings","dateDisplayStyle":{"__typename":"InheritableStringSettingWithPossibleValues","key":"layout.friendly_dates_enabled","value":"false","localValue":"true","possibleValues":["true","false"]},"dateDisplayFormat":{"__typename":"InheritableStringSetting","key":"layout.format_pattern_date","value":"MMM dd yyyy","localValue":"MM-dd-yyyy"},"language":{"__typename":"InheritableStringSettingWithPossibleValues","key":"profile.language","value":"en-US","localValue":null,"possibleValues":["en-US","es-ES"]},"repliesSortOrder":{"__typename":"InheritableStringSettingWithPossibleValues","key":"config.user_replies_sort_order","value":"DEFAULT","localValue":"DEFAULT","possibleValues":["DEFAULT","LIKES","PUBLISH_TIME","REVERSE_PUBLISH_TIME"]}},"deleted":false},"CachedAsset:pages-1746561118944":{"__typename":"CachedAsset","id":"pages-1746561118944","value":[{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"BlogViewAllPostsPage","type":"BLOG","urlPath":"/category/:categoryId/blog/:boardId/all-posts/(/:after|/:before)?","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"CasePortalPage","type":"CASE_PORTAL","urlPath":"/caseportal","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"CreateGroupHubPage","type":"GROUP_HUB","urlPath":"/groups/create","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"CaseViewPage","type":"CASE_DETAILS","urlPath":"/case/:caseId/:caseNumber","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"InboxPage","type":"COMMUNITY","urlPath":"/inbox","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"HelpFAQPage","type":"COMMUNITY","urlPath":"/help","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"IdeaMessagePage","type":"IDEA_POST","urlPath":"/idea/:boardId/:messageSubject/:messageId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"IdeaViewAllIdeasPage","type":"IDEA","urlPath":"/category/:categoryId/ideas/:boardId/all-ideas/(/:after|/:before)?","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"LoginPage","type":"USER","urlPath":"/signin","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"BlogPostPage","type":"BLOG","urlPath":"/category/:categoryId/blogs/:boardId/create","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"UserBlogPermissions.Page","type":"COMMUNITY","urlPath":"/c/user-blog-permissions/page","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"ThemeEditorPage","type":"COMMUNITY","urlPath":"/designer/themes","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"TkbViewAllArticlesPage","type":"TKB","urlPath":"/category/:categoryId/kb/:boardId/all-articles/(/:after|/:before)?","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1730819800000,"localOverride":null,"page":{"id":"AllEvents","type":"CUSTOM","urlPath":"/Events","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"OccasionEditPage","type":"EVENT","urlPath":"/event/:boardId/:messageSubject/:messageId/edit","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"OAuthAuthorizationAllowPage","type":"USER","urlPath":"/auth/authorize/allow","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"PageEditorPage","type":"COMMUNITY","urlPath":"/designer/pages","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"PostPage","type":"COMMUNITY","urlPath":"/category/:categoryId/:boardId/create","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"ForumBoardPage","type":"FORUM","urlPath":"/category/:categoryId/discussions/:boardId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"TkbBoardPage","type":"TKB","urlPath":"/category/:categoryId/kb/:boardId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"EventPostPage","type":"EVENT","urlPath":"/category/:categoryId/events/:boardId/create","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"UserBadgesPage","type":"COMMUNITY","urlPath":"/users/:login/:userId/badges","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"GroupHubMembershipAction","type":"GROUP_HUB","urlPath":"/membership/join/:nodeId/:membershipType","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"MaintenancePage","type":"COMMUNITY","urlPath":"/maintenance","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"IdeaReplyPage","type":"IDEA_REPLY","urlPath":"/idea/:boardId/:messageSubject/:messageId/comments/:replyId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"UserSettingsPage","type":"USER","urlPath":"/mysettings/:userSettingsTab","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"GroupHubsPage","type":"GROUP_HUB","urlPath":"/groups","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"ForumPostPage","type":"FORUM","urlPath":"/category/:categoryId/discussions/:boardId/create","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"OccasionRsvpActionPage","type":"OCCASION","urlPath":"/event/:boardId/:messageSubject/:messageId/rsvp/:responseType","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"VerifyUserEmailPage","type":"USER","urlPath":"/verifyemail/:userId/:verifyEmailToken","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"AllOccasionsPage","type":"OCCASION","urlPath":"/category/:categoryId/events/:boardId/all-events/(/:after|/:before)?","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"EventBoardPage","type":"EVENT","urlPath":"/category/:categoryId/events/:boardId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"TkbReplyPage","type":"TKB_REPLY","urlPath":"/kb/:boardId/:messageSubject/:messageId/comments/:replyId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"IdeaBoardPage","type":"IDEA","urlPath":"/category/:categoryId/ideas/:boardId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"CommunityGuideLinesPage","type":"COMMUNITY","urlPath":"/communityguidelines","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"CaseCreatePage","type":"SALESFORCE_CASE_CREATION","urlPath":"/caseportal/create","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"TkbEditPage","type":"TKB","urlPath":"/kb/:boardId/:messageSubject/:messageId/edit","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"ForgotPasswordPage","type":"USER","urlPath":"/forgotpassword","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"IdeaEditPage","type":"IDEA","urlPath":"/idea/:boardId/:messageSubject/:messageId/edit","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"TagPage","type":"COMMUNITY","urlPath":"/tag/:tagName","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"BlogBoardPage","type":"BLOG","urlPath":"/category/:categoryId/blog/:boardId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"OccasionMessagePage","type":"OCCASION_TOPIC","urlPath":"/event/:boardId/:messageSubject/:messageId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"ManageContentPage","type":"COMMUNITY","urlPath":"/managecontent","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"ClosedMembershipNodeNonMembersPage","type":"GROUP_HUB","urlPath":"/closedgroup/:groupHubId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"CommunityPage","type":"COMMUNITY","urlPath":"/","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"ForumMessagePage","type":"FORUM_TOPIC","urlPath":"/discussions/:boardId/:messageSubject/:messageId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"IdeaPostPage","type":"IDEA","urlPath":"/category/:categoryId/ideas/:boardId/create","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1730819800000,"localOverride":null,"page":{"id":"CommunityHub.Page","type":"CUSTOM","urlPath":"/Directory","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"BlogMessagePage","type":"BLOG_ARTICLE","urlPath":"/blog/:boardId/:messageSubject/:messageId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"RegistrationPage","type":"USER","urlPath":"/register","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"EditGroupHubPage","type":"GROUP_HUB","urlPath":"/group/:groupHubId/edit","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"ForumEditPage","type":"FORUM","urlPath":"/discussions/:boardId/:messageSubject/:messageId/edit","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"ResetPasswordPage","type":"USER","urlPath":"/resetpassword/:userId/:resetPasswordToken","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1730819800000,"localOverride":null,"page":{"id":"AllBlogs.Page","type":"CUSTOM","urlPath":"/blogs","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"TkbMessagePage","type":"TKB_ARTICLE","urlPath":"/kb/:boardId/:messageSubject/:messageId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"BlogEditPage","type":"BLOG","urlPath":"/blog/:boardId/:messageSubject/:messageId/edit","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"ManageUsersPage","type":"USER","urlPath":"/users/manage/:tab?/:manageUsersTab?","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"ForumReplyPage","type":"FORUM_REPLY","urlPath":"/discussions/:boardId/:messageSubject/:messageId/replies/:replyId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"PrivacyPolicyPage","type":"COMMUNITY","urlPath":"/privacypolicy","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"NotificationPage","type":"COMMUNITY","urlPath":"/notifications","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"UserPage","type":"USER","urlPath":"/users/:login/:userId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"HealthCheckPage","type":"COMMUNITY","urlPath":"/health","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"OccasionReplyPage","type":"OCCASION_REPLY","urlPath":"/event/:boardId/:messageSubject/:messageId/comments/:replyId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"ManageMembersPage","type":"GROUP_HUB","urlPath":"/group/:groupHubId/manage/:tab?","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"SearchResultsPage","type":"COMMUNITY","urlPath":"/search","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"BlogReplyPage","type":"BLOG_REPLY","urlPath":"/blog/:boardId/:messageSubject/:messageId/replies/:replyId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"GroupHubPage","type":"GROUP_HUB","urlPath":"/group/:groupHubId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"TermsOfServicePage","type":"COMMUNITY","urlPath":"/termsofservice","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"CategoryPage","type":"CATEGORY","urlPath":"/category/:categoryId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"ForumViewAllTopicsPage","type":"FORUM","urlPath":"/category/:categoryId/discussions/:boardId/all-topics/(/:after|/:before)?","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"TkbPostPage","type":"TKB","urlPath":"/category/:categoryId/kbs/:boardId/create","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1746561118944,"localOverride":null,"page":{"id":"GroupHubPostPage","type":"GROUP_HUB","urlPath":"/group/:groupHubId/:boardId/create","__typename":"PageDescriptor"},"__typename":"PageResource"}],"localOverride":false},"CachedAsset:text:en_US-components/context/AppContext/AppContextProvider-0":{"__typename":"CachedAsset","id":"text:en_US-components/context/AppContext/AppContextProvider-0","value":{"noCommunity":"Cannot find community","noUser":"Cannot find current user","noNode":"Cannot find node with id {nodeId}","noMessage":"Cannot find message with id {messageId}","userBanned":"We're sorry, but you have been banned from using this site.","userBannedReason":"You have been banned for the following reason: {reason}"},"localOverride":false},"CachedAsset:text:en_US-shared/client/components/common/Loading/LoadingDot-0":{"__typename":"CachedAsset","id":"text:en_US-shared/client/components/common/Loading/LoadingDot-0","value":{"title":"Loading..."},"localOverride":false},"CachedAsset:theme:customTheme1-1746561118375":{"__typename":"CachedAsset","id":"theme:customTheme1-1746561118375","value":{"id":"customTheme1","animation":{"fast":"150ms","normal":"250ms","slow":"500ms","slowest":"750ms","function":"cubic-bezier(0.07, 0.91, 0.51, 1)","__typename":"AnimationThemeSettings"},"avatar":{"borderRadius":"50%","collections":["default"],"__typename":"AvatarThemeSettings"},"basics":{"browserIcon":{"imageAssetName":"favicon-1730836283320.png","imageLastModified":"1730836286415","__typename":"ThemeAsset"},"customerLogo":{"imageAssetName":"favicon-1730836271365.png","imageLastModified":"1730836274203","__typename":"ThemeAsset"},"maximumWidthOfPageContent":"1300px","oneColumnNarrowWidth":"800px","gridGutterWidthMd":"30px","gridGutterWidthXs":"10px","pageWidthStyle":"WIDTH_OF_BROWSER","__typename":"BasicsThemeSettings"},"buttons":{"borderRadiusSm":"3px","borderRadius":"3px","borderRadiusLg":"5px","paddingY":"5px","paddingYLg":"7px","paddingYHero":"var(--lia-bs-btn-padding-y-lg)","paddingX":"12px","paddingXLg":"16px","paddingXHero":"60px","fontStyle":"NORMAL","fontWeight":"700","textTransform":"NONE","disabledOpacity":0.5,"primaryTextColor":"var(--lia-bs-white)","primaryTextHoverColor":"var(--lia-bs-white)","primaryTextActiveColor":"var(--lia-bs-white)","primaryBgColor":"var(--lia-bs-primary)","primaryBgHoverColor":"hsl(var(--lia-bs-primary-h), var(--lia-bs-primary-s), calc(var(--lia-bs-primary-l) * 0.85))","primaryBgActiveColor":"hsl(var(--lia-bs-primary-h), var(--lia-bs-primary-s), calc(var(--lia-bs-primary-l) * 0.7))","primaryBorder":"1px solid transparent","primaryBorderHover":"1px solid transparent","primaryBorderActive":"1px solid transparent","primaryBorderFocus":"1px solid var(--lia-bs-white)","primaryBoxShadowFocus":"0 0 0 1px var(--lia-bs-primary), 0 0 0 4px hsla(var(--lia-bs-primary-h), var(--lia-bs-primary-s), var(--lia-bs-primary-l), 0.2)","secondaryTextColor":"var(--lia-bs-gray-900)","secondaryTextHoverColor":"hsl(var(--lia-bs-gray-900-h), var(--lia-bs-gray-900-s), calc(var(--lia-bs-gray-900-l) * 0.95))","secondaryTextActiveColor":"hsl(var(--lia-bs-gray-900-h), var(--lia-bs-gray-900-s), calc(var(--lia-bs-gray-900-l) * 0.9))","secondaryBgColor":"var(--lia-bs-gray-200)","secondaryBgHoverColor":"hsl(var(--lia-bs-gray-200-h), var(--lia-bs-gray-200-s), calc(var(--lia-bs-gray-200-l) * 0.96))","secondaryBgActiveColor":"hsl(var(--lia-bs-gray-200-h), var(--lia-bs-gray-200-s), calc(var(--lia-bs-gray-200-l) * 0.92))","secondaryBorder":"1px solid transparent","secondaryBorderHover":"1px solid transparent","secondaryBorderActive":"1px solid transparent","secondaryBorderFocus":"1px solid transparent","secondaryBoxShadowFocus":"0 0 0 1px var(--lia-bs-primary), 0 0 0 4px hsla(var(--lia-bs-primary-h), var(--lia-bs-primary-s), var(--lia-bs-primary-l), 0.2)","tertiaryTextColor":"var(--lia-bs-gray-900)","tertiaryTextHoverColor":"hsl(var(--lia-bs-gray-900-h), var(--lia-bs-gray-900-s), calc(var(--lia-bs-gray-900-l) * 0.95))","tertiaryTextActiveColor":"hsl(var(--lia-bs-gray-900-h), var(--lia-bs-gray-900-s), calc(var(--lia-bs-gray-900-l) * 0.9))","tertiaryBgColor":"transparent","tertiaryBgHoverColor":"transparent","tertiaryBgActiveColor":"hsla(var(--lia-bs-black-h), var(--lia-bs-black-s), var(--lia-bs-black-l), 0.04)","tertiaryBorder":"1px solid transparent","tertiaryBorderHover":"1px solid hsla(var(--lia-bs-black-h), var(--lia-bs-black-s), var(--lia-bs-black-l), 0.08)","tertiaryBorderActive":"1px solid transparent","tertiaryBorderFocus":"1px solid transparent","tertiaryBoxShadowFocus":"0 0 0 1px var(--lia-bs-primary), 0 0 0 4px hsla(var(--lia-bs-primary-h), var(--lia-bs-primary-s), var(--lia-bs-primary-l), 0.2)","destructiveTextColor":"var(--lia-bs-danger)","destructiveTextHoverColor":"hsl(var(--lia-bs-danger-h), var(--lia-bs-danger-s), calc(var(--lia-bs-danger-l) * 0.95))","destructiveTextActiveColor":"hsl(var(--lia-bs-danger-h), var(--lia-bs-danger-s), calc(var(--lia-bs-danger-l) * 0.9))","destructiveBgColor":"var(--lia-bs-gray-200)","destructiveBgHoverColor":"hsl(var(--lia-bs-gray-200-h), var(--lia-bs-gray-200-s), calc(var(--lia-bs-gray-200-l) * 0.96))","destructiveBgActiveColor":"hsl(var(--lia-bs-gray-200-h), var(--lia-bs-gray-200-s), calc(var(--lia-bs-gray-200-l) * 0.92))","destructiveBorder":"1px solid transparent","destructiveBorderHover":"1px solid transparent","destructiveBorderActive":"1px solid transparent","destructiveBorderFocus":"1px solid transparent","destructiveBoxShadowFocus":"0 0 0 1px var(--lia-bs-primary), 0 0 0 4px hsla(var(--lia-bs-primary-h), var(--lia-bs-primary-s), var(--lia-bs-primary-l), 0.2)","__typename":"ButtonsThemeSettings"},"border":{"color":"hsla(var(--lia-bs-black-h), var(--lia-bs-black-s), var(--lia-bs-black-l), 0.08)","mainContent":"NONE","sideContent":"LIGHT","radiusSm":"3px","radius":"5px","radiusLg":"9px","radius50":"100vw","__typename":"BorderThemeSettings"},"boxShadow":{"xs":"0 0 0 1px hsla(var(--lia-bs-gray-900-h), var(--lia-bs-gray-900-s), var(--lia-bs-gray-900-l), 0.08), 0 3px 0 -1px hsla(var(--lia-bs-gray-900-h), var(--lia-bs-gray-900-s), var(--lia-bs-gray-900-l), 0.16)","sm":"0 2px 4px hsla(var(--lia-bs-gray-900-h), var(--lia-bs-gray-900-s), var(--lia-bs-gray-900-l), 0.12)","md":"0 5px 15px hsla(var(--lia-bs-gray-900-h), var(--lia-bs-gray-900-s), var(--lia-bs-gray-900-l), 0.3)","lg":"0 10px 30px hsla(var(--lia-bs-gray-900-h), var(--lia-bs-gray-900-s), var(--lia-bs-gray-900-l), 0.3)","__typename":"BoxShadowThemeSettings"},"cards":{"bgColor":"var(--lia-panel-bg-color)","borderRadius":"var(--lia-panel-border-radius)","boxShadow":"var(--lia-box-shadow-xs)","__typename":"CardsThemeSettings"},"chip":{"maxWidth":"300px","height":"30px","__typename":"ChipThemeSettings"},"coreTypes":{"defaultMessageLinkColor":"var(--lia-bs-link-color)","defaultMessageLinkDecoration":"none","defaultMessageLinkFontStyle":"NORMAL","defaultMessageLinkFontWeight":"400","defaultMessageFontStyle":"NORMAL","defaultMessageFontWeight":"400","defaultMessageFontFamily":"var(--lia-bs-font-family-base)","forumColor":"#4099E2","forumFontFamily":"var(--lia-bs-font-family-base)","forumFontWeight":"var(--lia-default-message-font-weight)","forumLineHeight":"var(--lia-bs-line-height-base)","forumFontStyle":"var(--lia-default-message-font-style)","forumMessageLinkColor":"var(--lia-default-message-link-color)","forumMessageLinkDecoration":"var(--lia-default-message-link-decoration)","forumMessageLinkFontStyle":"var(--lia-default-message-link-font-style)","forumMessageLinkFontWeight":"var(--lia-default-message-link-font-weight)","forumSolvedColor":"#148563","blogColor":"#1CBAA0","blogFontFamily":"var(--lia-bs-font-family-base)","blogFontWeight":"var(--lia-default-message-font-weight)","blogLineHeight":"1.75","blogFontStyle":"var(--lia-default-message-font-style)","blogMessageLinkColor":"var(--lia-default-message-link-color)","blogMessageLinkDecoration":"var(--lia-default-message-link-decoration)","blogMessageLinkFontStyle":"var(--lia-default-message-link-font-style)","blogMessageLinkFontWeight":"var(--lia-default-message-link-font-weight)","tkbColor":"#4C6B90","tkbFontFamily":"var(--lia-bs-font-family-base)","tkbFontWeight":"var(--lia-default-message-font-weight)","tkbLineHeight":"1.75","tkbFontStyle":"var(--lia-default-message-font-style)","tkbMessageLinkColor":"var(--lia-default-message-link-color)","tkbMessageLinkDecoration":"var(--lia-default-message-link-decoration)","tkbMessageLinkFontStyle":"var(--lia-default-message-link-font-style)","tkbMessageLinkFontWeight":"var(--lia-default-message-link-font-weight)","qandaColor":"#4099E2","qandaFontFamily":"var(--lia-bs-font-family-base)","qandaFontWeight":"var(--lia-default-message-font-weight)","qandaLineHeight":"var(--lia-bs-line-height-base)","qandaFontStyle":"var(--lia-default-message-link-font-style)","qandaMessageLinkColor":"var(--lia-default-message-link-color)","qandaMessageLinkDecoration":"var(--lia-default-message-link-decoration)","qandaMessageLinkFontStyle":"var(--lia-default-message-link-font-style)","qandaMessageLinkFontWeight":"var(--lia-default-message-link-font-weight)","qandaSolvedColor":"#3FA023","ideaColor":"#FF8000","ideaFontFamily":"var(--lia-bs-font-family-base)","ideaFontWeight":"var(--lia-default-message-font-weight)","ideaLineHeight":"var(--lia-bs-line-height-base)","ideaFontStyle":"var(--lia-default-message-font-style)","ideaMessageLinkColor":"var(--lia-default-message-link-color)","ideaMessageLinkDecoration":"var(--lia-default-message-link-decoration)","ideaMessageLinkFontStyle":"var(--lia-default-message-link-font-style)","ideaMessageLinkFontWeight":"var(--lia-default-message-link-font-weight)","contestColor":"#FCC845","contestFontFamily":"var(--lia-bs-font-family-base)","contestFontWeight":"var(--lia-default-message-font-weight)","contestLineHeight":"var(--lia-bs-line-height-base)","contestFontStyle":"var(--lia-default-message-link-font-style)","contestMessageLinkColor":"var(--lia-default-message-link-color)","contestMessageLinkDecoration":"var(--lia-default-message-link-decoration)","contestMessageLinkFontStyle":"ITALIC","contestMessageLinkFontWeight":"var(--lia-default-message-link-font-weight)","occasionColor":"#D13A1F","occasionFontFamily":"var(--lia-bs-font-family-base)","occasionFontWeight":"var(--lia-default-message-font-weight)","occasionLineHeight":"var(--lia-bs-line-height-base)","occasionFontStyle":"var(--lia-default-message-font-style)","occasionMessageLinkColor":"var(--lia-default-message-link-color)","occasionMessageLinkDecoration":"var(--lia-default-message-link-decoration)","occasionMessageLinkFontStyle":"var(--lia-default-message-link-font-style)","occasionMessageLinkFontWeight":"var(--lia-default-message-link-font-weight)","grouphubColor":"#333333","categoryColor":"#949494","communityColor":"#FFFFFF","productColor":"#949494","__typename":"CoreTypesThemeSettings"},"colors":{"black":"#000000","white":"#FFFFFF","gray100":"#F7F7F7","gray200":"#F7F7F7","gray300":"#E8E8E8","gray400":"#D9D9D9","gray500":"#CCCCCC","gray600":"#717171","gray700":"#707070","gray800":"#545454","gray900":"#333333","dark":"#545454","light":"#F7F7F7","primary":"#0069D4","secondary":"#333333","bodyText":"#1E1E1E","bodyBg":"#FFFFFF","info":"#409AE2","success":"#41C5AE","warning":"#FCC844","danger":"#BC341B","alertSystem":"#FF6600","textMuted":"#707070","highlight":"#FFFCAD","outline":"var(--lia-bs-primary)","custom":["#D3F5A4","#243A5E"],"__typename":"ColorsThemeSettings"},"divider":{"size":"3px","marginLeft":"4px","marginRight":"4px","borderRadius":"50%","bgColor":"var(--lia-bs-gray-600)","bgColorActive":"var(--lia-bs-gray-600)","__typename":"DividerThemeSettings"},"dropdown":{"fontSize":"var(--lia-bs-font-size-sm)","borderColor":"var(--lia-bs-border-color)","borderRadius":"var(--lia-bs-border-radius-sm)","dividerBg":"var(--lia-bs-gray-300)","itemPaddingY":"5px","itemPaddingX":"20px","headerColor":"var(--lia-bs-gray-700)","__typename":"DropdownThemeSettings"},"email":{"link":{"color":"#0069D4","hoverColor":"#0061c2","decoration":"none","hoverDecoration":"underline","__typename":"EmailLinkSettings"},"border":{"color":"#e4e4e4","__typename":"EmailBorderSettings"},"buttons":{"borderRadiusLg":"5px","paddingXLg":"16px","paddingYLg":"7px","fontWeight":"700","primaryTextColor":"#ffffff","primaryTextHoverColor":"#ffffff","primaryBgColor":"#0069D4","primaryBgHoverColor":"#005cb8","primaryBorder":"1px solid transparent","primaryBorderHover":"1px solid transparent","__typename":"EmailButtonsSettings"},"panel":{"borderRadius":"5px","borderColor":"#e4e4e4","__typename":"EmailPanelSettings"},"__typename":"EmailThemeSettings"},"emoji":{"skinToneDefault":"#ffcd43","skinToneLight":"#fae3c5","skinToneMediumLight":"#e2cfa5","skinToneMedium":"#daa478","skinToneMediumDark":"#a78058","skinToneDark":"#5e4d43","__typename":"EmojiThemeSettings"},"heading":{"color":"var(--lia-bs-body-color)","fontFamily":"Segoe UI","fontStyle":"NORMAL","fontWeight":"400","h1FontSize":"34px","h2FontSize":"32px","h3FontSize":"28px","h4FontSize":"24px","h5FontSize":"20px","h6FontSize":"16px","lineHeight":"1.3","subHeaderFontSize":"11px","subHeaderFontWeight":"500","h1LetterSpacing":"normal","h2LetterSpacing":"normal","h3LetterSpacing":"normal","h4LetterSpacing":"normal","h5LetterSpacing":"normal","h6LetterSpacing":"normal","subHeaderLetterSpacing":"2px","h1FontWeight":"var(--lia-bs-headings-font-weight)","h2FontWeight":"var(--lia-bs-headings-font-weight)","h3FontWeight":"var(--lia-bs-headings-font-weight)","h4FontWeight":"var(--lia-bs-headings-font-weight)","h5FontWeight":"var(--lia-bs-headings-font-weight)","h6FontWeight":"var(--lia-bs-headings-font-weight)","__typename":"HeadingThemeSettings"},"icons":{"size10":"10px","size12":"12px","size14":"14px","size16":"16px","size20":"20px","size24":"24px","size30":"30px","size40":"40px","size50":"50px","size60":"60px","size80":"80px","size120":"120px","size160":"160px","__typename":"IconsThemeSettings"},"imagePreview":{"bgColor":"var(--lia-bs-gray-900)","titleColor":"var(--lia-bs-white)","controlColor":"var(--lia-bs-white)","controlBgColor":"var(--lia-bs-gray-800)","__typename":"ImagePreviewThemeSettings"},"input":{"borderColor":"var(--lia-bs-gray-600)","disabledColor":"var(--lia-bs-gray-600)","focusBorderColor":"var(--lia-bs-primary)","labelMarginBottom":"10px","btnFontSize":"var(--lia-bs-font-size-sm)","focusBoxShadow":"0 0 0 3px hsla(var(--lia-bs-primary-h), var(--lia-bs-primary-s), var(--lia-bs-primary-l), 0.2)","checkLabelMarginBottom":"2px","checkboxBorderRadius":"3px","borderRadiusSm":"var(--lia-bs-border-radius-sm)","borderRadius":"var(--lia-bs-border-radius)","borderRadiusLg":"var(--lia-bs-border-radius-lg)","formTextMarginTop":"4px","textAreaBorderRadius":"var(--lia-bs-border-radius)","activeFillColor":"var(--lia-bs-primary)","__typename":"InputThemeSettings"},"loading":{"dotDarkColor":"hsla(var(--lia-bs-black-h), var(--lia-bs-black-s), var(--lia-bs-black-l), 0.2)","dotLightColor":"hsla(var(--lia-bs-white-h), var(--lia-bs-white-s), var(--lia-bs-white-l), 0.5)","barDarkColor":"hsla(var(--lia-bs-black-h), var(--lia-bs-black-s), var(--lia-bs-black-l), 0.06)","barLightColor":"hsla(var(--lia-bs-white-h), var(--lia-bs-white-s), var(--lia-bs-white-l), 0.4)","__typename":"LoadingThemeSettings"},"link":{"color":"var(--lia-bs-primary)","hoverColor":"hsl(var(--lia-bs-primary-h), var(--lia-bs-primary-s), calc(var(--lia-bs-primary-l) - 10%))","decoration":"none","hoverDecoration":"underline","__typename":"LinkThemeSettings"},"listGroup":{"itemPaddingY":"15px","itemPaddingX":"15px","borderColor":"var(--lia-bs-gray-300)","__typename":"ListGroupThemeSettings"},"modal":{"contentTextColor":"var(--lia-bs-body-color)","contentBg":"var(--lia-bs-white)","backgroundBg":"var(--lia-bs-black)","smSize":"440px","mdSize":"760px","lgSize":"1080px","backdropOpacity":0.3,"contentBoxShadowXs":"var(--lia-bs-box-shadow-sm)","contentBoxShadow":"var(--lia-bs-box-shadow)","headerFontWeight":"700","__typename":"ModalThemeSettings"},"navbar":{"position":"FIXED","background":{"attachment":null,"clip":null,"color":"var(--lia-bs-white)","imageAssetName":"","imageLastModified":"0","origin":null,"position":"CENTER_CENTER","repeat":"NO_REPEAT","size":"COVER","__typename":"BackgroundProps"},"backgroundOpacity":0.8,"paddingTop":"15px","paddingBottom":"15px","borderBottom":"1px solid var(--lia-bs-border-color)","boxShadow":"var(--lia-bs-box-shadow-sm)","brandMarginRight":"30px","brandMarginRightSm":"10px","brandLogoHeight":"30px","linkGap":"10px","linkJustifyContent":"flex-start","linkPaddingY":"5px","linkPaddingX":"10px","linkDropdownPaddingY":"9px","linkDropdownPaddingX":"var(--lia-nav-link-px)","linkColor":"var(--lia-bs-body-color)","linkHoverColor":"var(--lia-bs-primary)","linkFontSize":"var(--lia-bs-font-size-sm)","linkFontStyle":"NORMAL","linkFontWeight":"400","linkTextTransform":"NONE","linkLetterSpacing":"normal","linkBorderRadius":"var(--lia-bs-border-radius-sm)","linkBgColor":"transparent","linkBgHoverColor":"transparent","linkBorder":"none","linkBorderHover":"none","linkBoxShadow":"none","linkBoxShadowHover":"none","linkTextBorderBottom":"none","linkTextBorderBottomHover":"none","dropdownPaddingTop":"10px","dropdownPaddingBottom":"15px","dropdownPaddingX":"10px","dropdownMenuOffset":"2px","dropdownDividerMarginTop":"10px","dropdownDividerMarginBottom":"10px","dropdownBorderColor":"hsla(var(--lia-bs-black-h), var(--lia-bs-black-s), var(--lia-bs-black-l), 0.08)","controllerBgHoverColor":"hsla(var(--lia-bs-black-h), var(--lia-bs-black-s), var(--lia-bs-black-l), 0.1)","controllerIconColor":"var(--lia-bs-body-color)","controllerIconHoverColor":"var(--lia-bs-body-color)","controllerTextColor":"var(--lia-nav-controller-icon-color)","controllerTextHoverColor":"var(--lia-nav-controller-icon-hover-color)","controllerHighlightColor":"hsla(30, 100%, 50%)","controllerHighlightTextColor":"var(--lia-yiq-light)","controllerBorderRadius":"var(--lia-border-radius-50)","hamburgerColor":"var(--lia-nav-controller-icon-color)","hamburgerHoverColor":"var(--lia-nav-controller-icon-color)","hamburgerBgColor":"transparent","hamburgerBgHoverColor":"transparent","hamburgerBorder":"none","hamburgerBorderHover":"none","collapseMenuMarginLeft":"20px","collapseMenuDividerBg":"var(--lia-nav-link-color)","collapseMenuDividerOpacity":0.16,"__typename":"NavbarThemeSettings"},"pager":{"textColor":"var(--lia-bs-link-color)","textFontWeight":"var(--lia-font-weight-md)","textFontSize":"var(--lia-bs-font-size-sm)","__typename":"PagerThemeSettings"},"panel":{"bgColor":"var(--lia-bs-white)","borderRadius":"var(--lia-bs-border-radius)","borderColor":"var(--lia-bs-border-color)","boxShadow":"none","__typename":"PanelThemeSettings"},"popover":{"arrowHeight":"8px","arrowWidth":"16px","maxWidth":"300px","minWidth":"100px","headerBg":"var(--lia-bs-white)","borderColor":"var(--lia-bs-border-color)","borderRadius":"var(--lia-bs-border-radius)","boxShadow":"0 0.5rem 1rem hsla(var(--lia-bs-black-h), var(--lia-bs-black-s), var(--lia-bs-black-l), 0.15)","__typename":"PopoverThemeSettings"},"prism":{"color":"#000000","bgColor":"#f5f2f0","fontFamily":"var(--font-family-monospace)","fontSize":"var(--lia-bs-font-size-base)","fontWeightBold":"var(--lia-bs-font-weight-bold)","fontStyleItalic":"italic","tabSize":2,"highlightColor":"#b3d4fc","commentColor":"#62707e","punctuationColor":"#6f6f6f","namespaceOpacity":"0.7","propColor":"#990055","selectorColor":"#517a00","operatorColor":"#906736","operatorBgColor":"hsla(0, 0%, 100%, 0.5)","keywordColor":"#0076a9","functionColor":"#d3284b","variableColor":"#c14700","__typename":"PrismThemeSettings"},"rte":{"bgColor":"var(--lia-bs-white)","borderRadius":"var(--lia-panel-border-radius)","boxShadow":" var(--lia-panel-box-shadow)","customColor1":"#bfedd2","customColor2":"#fbeeb8","customColor3":"#f8cac6","customColor4":"#eccafa","customColor5":"#c2e0f4","customColor6":"#2dc26b","customColor7":"#f1c40f","customColor8":"#e03e2d","customColor9":"#b96ad9","customColor10":"#3598db","customColor11":"#169179","customColor12":"#e67e23","customColor13":"#ba372a","customColor14":"#843fa1","customColor15":"#236fa1","customColor16":"#ecf0f1","customColor17":"#ced4d9","customColor18":"#95a5a6","customColor19":"#7e8c8d","customColor20":"#34495e","customColor21":"#000000","customColor22":"#ffffff","defaultMessageHeaderMarginTop":"40px","defaultMessageHeaderMarginBottom":"20px","defaultMessageItemMarginTop":"0","defaultMessageItemMarginBottom":"10px","diffAddedColor":"hsla(170, 53%, 51%, 0.4)","diffChangedColor":"hsla(43, 97%, 63%, 0.4)","diffNoneColor":"hsla(0, 0%, 80%, 0.4)","diffRemovedColor":"hsla(9, 74%, 47%, 0.4)","specialMessageHeaderMarginTop":"40px","specialMessageHeaderMarginBottom":"20px","specialMessageItemMarginTop":"0","specialMessageItemMarginBottom":"10px","__typename":"RteThemeSettings"},"tags":{"bgColor":"var(--lia-bs-gray-200)","bgHoverColor":"var(--lia-bs-gray-400)","borderRadius":"var(--lia-bs-border-radius-sm)","color":"var(--lia-bs-body-color)","hoverColor":"var(--lia-bs-body-color)","fontWeight":"var(--lia-font-weight-md)","fontSize":"var(--lia-font-size-xxs)","textTransform":"UPPERCASE","letterSpacing":"0.5px","__typename":"TagsThemeSettings"},"toasts":{"borderRadius":"var(--lia-bs-border-radius)","paddingX":"12px","__typename":"ToastsThemeSettings"},"typography":{"fontFamilyBase":"Segoe UI","fontStyleBase":"NORMAL","fontWeightBase":"400","fontWeightLight":"300","fontWeightNormal":"400","fontWeightMd":"500","fontWeightBold":"700","letterSpacingSm":"normal","letterSpacingXs":"normal","lineHeightBase":"1.5","fontSizeBase":"16px","fontSizeXxs":"11px","fontSizeXs":"12px","fontSizeSm":"14px","fontSizeLg":"20px","fontSizeXl":"24px","smallFontSize":"14px","customFonts":[{"source":"SERVER","name":"Segoe UI","styles":[{"style":"NORMAL","weight":"400","__typename":"FontStyleData"},{"style":"NORMAL","weight":"300","__typename":"FontStyleData"},{"style":"NORMAL","weight":"600","__typename":"FontStyleData"},{"style":"NORMAL","weight":"700","__typename":"FontStyleData"},{"style":"ITALIC","weight":"400","__typename":"FontStyleData"}],"assetNames":["SegoeUI-normal-400.woff2","SegoeUI-normal-300.woff2","SegoeUI-normal-600.woff2","SegoeUI-normal-700.woff2","SegoeUI-italic-400.woff2"],"__typename":"CustomFont"},{"source":"SERVER","name":"MWF Fluent Icons","styles":[{"style":"NORMAL","weight":"400","__typename":"FontStyleData"}],"assetNames":["MWFFluentIcons-normal-400.woff2"],"__typename":"CustomFont"}],"__typename":"TypographyThemeSettings"},"unstyledListItem":{"marginBottomSm":"5px","marginBottomMd":"10px","marginBottomLg":"15px","marginBottomXl":"20px","marginBottomXxl":"25px","__typename":"UnstyledListItemThemeSettings"},"yiq":{"light":"#ffffff","dark":"#000000","__typename":"YiqThemeSettings"},"colorLightness":{"primaryDark":0.36,"primaryLight":0.74,"primaryLighter":0.89,"primaryLightest":0.95,"infoDark":0.39,"infoLight":0.72,"infoLighter":0.85,"infoLightest":0.93,"successDark":0.24,"successLight":0.62,"successLighter":0.8,"successLightest":0.91,"warningDark":0.39,"warningLight":0.68,"warningLighter":0.84,"warningLightest":0.93,"dangerDark":0.41,"dangerLight":0.72,"dangerLighter":0.89,"dangerLightest":0.95,"__typename":"ColorLightnessThemeSettings"},"localOverride":false,"__typename":"Theme"},"localOverride":false},"CachedAsset:text:en_US-shared/client/components/common/Loading/LoadingDot-1745505307000":{"__typename":"CachedAsset","id":"text:en_US-shared/client/components/common/Loading/LoadingDot-1745505307000","value":{"title":"Loading..."},"localOverride":false},"CachedAsset:text:en_US-components/common/EmailVerification-1745505307000":{"__typename":"CachedAsset","id":"text:en_US-components/common/EmailVerification-1745505307000","value":{"email.verification.title":"Email Verification Required","email.verification.message.update.email":"To participate in the community, you must first verify your email address. The verification email was sent to {email}. To change your email, visit My Settings.","email.verification.message.resend.email":"To participate in the community, you must first verify your email address. The verification email was sent to {email}. Resend email."},"localOverride":false},"CachedAsset:text:en_US-pages/tags/TagPage-1745505307000":{"__typename":"CachedAsset","id":"text:en_US-pages/tags/TagPage-1745505307000","value":{"tagPageTitle":"Tag:\"{tagName}\" | {communityTitle}","tagPageForNodeTitle":"Tag:\"{tagName}\" in \"{title}\" | {communityTitle}","name":"Tags Page","tag":"Tag: {tagName}"},"localOverride":false},"Category:category:FastTrack":{"__typename":"Category","id":"category:FastTrack","entityType":"CATEGORY","displayId":"FastTrack","nodeType":"category","depth":3,"title":"Microsoft FastTrack","shortTitle":"Microsoft FastTrack","parent":{"__ref":"Category:category:products-services"}},"Category:category:top":{"__typename":"Category","id":"category:top","displayId":"top","nodeType":"category","depth":0,"title":"Top"},"Category:category:communities":{"__typename":"Category","id":"category:communities","displayId":"communities","nodeType":"category","depth":1,"parent":{"__ref":"Category:category:top"},"title":"Communities"},"Category:category:products-services":{"__typename":"Category","id":"category:products-services","displayId":"products-services","nodeType":"category","depth":2,"parent":{"__ref":"Category:category:communities"},"title":"Products"},"Blog:board:FastTrackforAzureBlog":{"__typename":"Blog","id":"board:FastTrackforAzureBlog","entityType":"BLOG","displayId":"FastTrackforAzureBlog","nodeType":"board","depth":4,"conversationStyle":"BLOG","title":"FastTrack for Azure","description":"","avatar":null,"profileSettings":{"__typename":"ProfileSettings","language":null},"parent":{"__ref":"Category:category:FastTrack"},"ancestors":{"__typename":"CoreNodeConnection","edges":[{"__typename":"CoreNodeEdge","node":{"__ref":"Community:community:gxcuf89792"}},{"__typename":"CoreNodeEdge","node":{"__ref":"Category:category:communities"}},{"__typename":"CoreNodeEdge","node":{"__ref":"Category:category:products-services"}},{"__typename":"CoreNodeEdge","node":{"__ref":"Category:category:FastTrack"}}]},"userContext":{"__typename":"NodeUserContext","canAddAttachments":false,"canUpdateNode":false,"canPostMessages":false,"isSubscribed":false},"boardPolicies":{"__typename":"BoardPolicies","canPublishArticleOnCreate":{"__typename":"PolicyResult","failureReason":{"__typename":"FailureReason","message":"error.lithium.policies.forums.policy_can_publish_on_create_workflow_action.accessDenied","key":"error.lithium.policies.forums.policy_can_publish_on_create_workflow_action.accessDenied","args":[]}}},"theme":{"__ref":"Theme:customTheme1"},"shortTitle":"FastTrack for Azure","tagPolicies":{"__typename":"TagPolicies","canSubscribeTagOnNode":{"__typename":"PolicyResult","failureReason":{"__typename":"FailureReason","message":"error.lithium.policies.labels.action.corenode.subscribe_labels.allow.accessDenied","key":"error.lithium.policies.labels.action.corenode.subscribe_labels.allow.accessDenied","args":[]}},"canManageTagDashboard":{"__typename":"PolicyResult","failureReason":{"__typename":"FailureReason","message":"error.lithium.policies.labels.action.corenode.admin_labels.allow.accessDenied","key":"error.lithium.policies.labels.action.corenode.admin_labels.allow.accessDenied","args":[]}}}},"CachedAsset:quilt:o365.prod:pages/tags/TagPage:board:FastTrackforAzureBlog-1746740537097":{"__typename":"CachedAsset","id":"quilt:o365.prod:pages/tags/TagPage:board:FastTrackforAzureBlog-1746740537097","value":{"id":"TagPage","container":{"id":"Common","headerProps":{"removeComponents":["community.widget.bannerWidget"],"__typename":"QuiltContainerSectionProps"},"items":[{"id":"tag-header-widget","layout":"ONE_COLUMN","bgColor":"var(--lia-bs-white)","showBorder":"BOTTOM","sectionEditLevel":"LOCKED","columnMap":{"main":[{"id":"tags.widget.TagsHeaderWidget","__typename":"QuiltComponent"}],"__typename":"OneSectionColumns"},"__typename":"OneColumnQuiltSection"},{"id":"messages-list-for-tag-widget","layout":"ONE_COLUMN","columnMap":{"main":[{"id":"messages.widget.messageListForNodeByRecentActivityWidget","props":{"viewVariant":{"type":"inline","props":{"useUnreadCount":true,"useViewCount":true,"useAuthorLogin":true,"clampBodyLines":3,"useAvatar":true,"useBoardIcon":false,"useKudosCount":true,"usePreviewMedia":true,"useTags":false,"useNode":true,"useNodeLink":true,"useTextBody":true,"truncateBodyLength":-1,"useBody":true,"useRepliesCount":true,"useSolvedBadge":true,"timeStampType":"conversation.lastPostingActivityTime","useMessageTimeLink":true,"clampSubjectLines":2}},"panelType":"divider","useTitle":false,"hideIfEmpty":false,"pagerVariant":{"type":"loadMore"},"style":"list","showTabs":true,"tabItemMap":{"default":{"mostRecent":true,"mostRecentUserContent":false,"newest":false},"additional":{"mostKudoed":true,"mostViewed":true,"mostReplies":false,"noReplies":false,"noSolutions":false,"solutions":false}}},"__typename":"QuiltComponent"}],"__typename":"OneSectionColumns"},"__typename":"OneColumnQuiltSection"}],"__typename":"QuiltContainer"},"__typename":"Quilt"},"localOverride":false},"CachedAsset:quiltWrapper:o365.prod:Common:1746797692761":{"__typename":"CachedAsset","id":"quiltWrapper:o365.prod:Common:1746797692761","value":{"id":"Common","header":{"backgroundImageProps":{"assetName":null,"backgroundSize":"COVER","backgroundRepeat":"NO_REPEAT","backgroundPosition":"CENTER_CENTER","lastModified":null,"__typename":"BackgroundImageProps"},"backgroundColor":"transparent","items":[{"id":"community.widget.navbarWidget","props":{"showUserName":true,"showRegisterLink":true,"useIconLanguagePicker":true,"useLabelLanguagePicker":true,"className":"QuiltComponent_lia-component-edit-mode__0nCcm","links":{"sideLinks":[],"mainLinks":[{"children":[],"linkType":"INTERNAL","id":"gxcuf89792","params":{},"routeName":"CommunityPage"},{"children":[],"linkType":"EXTERNAL","id":"external-link","url":"/Directory","target":"SELF"},{"children":[{"linkType":"INTERNAL","id":"microsoft365","params":{"categoryId":"microsoft365"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"windows","params":{"categoryId":"Windows"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"Common-microsoft365-copilot-link","params":{"categoryId":"Microsoft365Copilot"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"microsoft-teams","params":{"categoryId":"MicrosoftTeams"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"microsoft-securityand-compliance","params":{"categoryId":"microsoft-security"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"azure","params":{"categoryId":"Azure"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"Common-content_management-link","params":{"categoryId":"Content_Management"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"exchange","params":{"categoryId":"Exchange"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"windows-server","params":{"categoryId":"Windows-Server"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"outlook","params":{"categoryId":"Outlook"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"microsoft-endpoint-manager","params":{"categoryId":"microsoftintune"},"routeName":"CategoryPage"},{"linkType":"EXTERNAL","id":"external-link-2","url":"/Directory","target":"SELF"}],"linkType":"EXTERNAL","id":"communities","url":"/","target":"BLANK"},{"children":[{"linkType":"INTERNAL","id":"a-i","params":{"categoryId":"AI"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"education-sector","params":{"categoryId":"EducationSector"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"partner-community","params":{"categoryId":"PartnerCommunity"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"i-t-ops-talk","params":{"categoryId":"ITOpsTalk"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"healthcare-and-life-sciences","params":{"categoryId":"HealthcareAndLifeSciences"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"microsoft-mechanics","params":{"categoryId":"MicrosoftMechanics"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"public-sector","params":{"categoryId":"PublicSector"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"s-m-b","params":{"categoryId":"MicrosoftforNonprofits"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"io-t","params":{"categoryId":"IoT"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"startupsat-microsoft","params":{"categoryId":"StartupsatMicrosoft"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"driving-adoption","params":{"categoryId":"DrivingAdoption"},"routeName":"CategoryPage"},{"linkType":"EXTERNAL","id":"external-link-1","url":"/Directory","target":"SELF"}],"linkType":"EXTERNAL","id":"communities-1","url":"/","target":"SELF"},{"children":[],"linkType":"EXTERNAL","id":"external","url":"/Blogs","target":"SELF"},{"children":[],"linkType":"EXTERNAL","id":"external-1","url":"/Events","target":"SELF"},{"children":[{"linkType":"INTERNAL","id":"microsoft-learn-1","params":{"categoryId":"MicrosoftLearn"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"microsoft-learn-blog","params":{"boardId":"MicrosoftLearnBlog","categoryId":"MicrosoftLearn"},"routeName":"BlogBoardPage"},{"linkType":"EXTERNAL","id":"external-10","url":"https://learningroomdirectory.microsoft.com/","target":"BLANK"},{"linkType":"EXTERNAL","id":"external-3","url":"https://docs.microsoft.com/learn/dynamics365/?WT.mc_id=techcom_header-webpage-m365","target":"BLANK"},{"linkType":"EXTERNAL","id":"external-4","url":"https://docs.microsoft.com/learn/m365/?wt.mc_id=techcom_header-webpage-m365","target":"BLANK"},{"linkType":"EXTERNAL","id":"external-5","url":"https://docs.microsoft.com/learn/topics/sci/?wt.mc_id=techcom_header-webpage-m365","target":"BLANK"},{"linkType":"EXTERNAL","id":"external-6","url":"https://docs.microsoft.com/learn/powerplatform/?wt.mc_id=techcom_header-webpage-powerplatform","target":"BLANK"},{"linkType":"EXTERNAL","id":"external-7","url":"https://docs.microsoft.com/learn/github/?wt.mc_id=techcom_header-webpage-github","target":"BLANK"},{"linkType":"EXTERNAL","id":"external-8","url":"https://docs.microsoft.com/learn/teams/?wt.mc_id=techcom_header-webpage-teams","target":"BLANK"},{"linkType":"EXTERNAL","id":"external-9","url":"https://docs.microsoft.com/learn/dotnet/?wt.mc_id=techcom_header-webpage-dotnet","target":"BLANK"},{"linkType":"EXTERNAL","id":"external-2","url":"https://docs.microsoft.com/learn/azure/?WT.mc_id=techcom_header-webpage-m365","target":"BLANK"}],"linkType":"INTERNAL","id":"microsoft-learn","params":{"categoryId":"MicrosoftLearn"},"routeName":"CategoryPage"},{"children":[],"linkType":"INTERNAL","id":"community-info-center","params":{"categoryId":"Community-Info-Center"},"routeName":"CategoryPage"}]},"style":{"boxShadow":"var(--lia-bs-box-shadow-sm)","controllerHighlightColor":"hsla(30, 100%, 50%)","linkFontWeight":"400","dropdownDividerMarginBottom":"10px","hamburgerBorderHover":"none","linkBoxShadowHover":"none","linkFontSize":"14px","backgroundOpacity":0.8,"controllerBorderRadius":"var(--lia-border-radius-50)","hamburgerBgColor":"transparent","hamburgerColor":"var(--lia-nav-controller-icon-color)","linkTextBorderBottom":"none","brandLogoHeight":"30px","linkBgHoverColor":"transparent","linkLetterSpacing":"normal","collapseMenuDividerOpacity":0.16,"dropdownPaddingBottom":"15px","paddingBottom":"15px","dropdownMenuOffset":"2px","hamburgerBgHoverColor":"transparent","borderBottom":"1px solid var(--lia-bs-border-color)","hamburgerBorder":"none","dropdownPaddingX":"10px","brandMarginRightSm":"10px","linkBoxShadow":"none","collapseMenuDividerBg":"var(--lia-nav-link-color)","linkColor":"var(--lia-bs-body-color)","linkJustifyContent":"flex-start","dropdownPaddingTop":"10px","controllerHighlightTextColor":"var(--lia-yiq-dark)","controllerTextColor":"var(--lia-nav-controller-icon-color)","background":{"imageAssetName":"","color":"var(--lia-bs-white)","size":"COVER","repeat":"NO_REPEAT","position":"CENTER_CENTER","imageLastModified":""},"linkBorderRadius":"var(--lia-bs-border-radius-sm)","linkHoverColor":"var(--lia-bs-body-color)","position":"FIXED","linkBorder":"none","linkTextBorderBottomHover":"2px solid var(--lia-bs-body-color)","brandMarginRight":"30px","hamburgerHoverColor":"var(--lia-nav-controller-icon-color)","linkBorderHover":"none","collapseMenuMarginLeft":"20px","linkFontStyle":"NORMAL","controllerTextHoverColor":"var(--lia-nav-controller-icon-hover-color)","linkPaddingX":"10px","linkPaddingY":"5px","paddingTop":"15px","linkTextTransform":"NONE","dropdownBorderColor":"hsla(var(--lia-bs-black-h), var(--lia-bs-black-s), var(--lia-bs-black-l), 0.08)","controllerBgHoverColor":"hsla(var(--lia-bs-black-h), var(--lia-bs-black-s), var(--lia-bs-black-l), 0.1)","linkBgColor":"transparent","linkDropdownPaddingX":"var(--lia-nav-link-px)","linkDropdownPaddingY":"9px","controllerIconColor":"var(--lia-bs-body-color)","dropdownDividerMarginTop":"10px","linkGap":"10px","controllerIconHoverColor":"var(--lia-bs-body-color)"},"showSearchIcon":false,"languagePickerStyle":"iconAndLabel"},"__typename":"QuiltComponent"},{"id":"community.widget.breadcrumbWidget","props":{"backgroundColor":"transparent","linkHighlightColor":"var(--lia-bs-primary)","visualEffects":{"showBottomBorder":true},"linkTextColor":"var(--lia-bs-gray-700)"},"__typename":"QuiltComponent"},{"id":"custom.widget.HeroBanner","props":{"widgetVisibility":"signedInOrAnonymous","usePageWidth":false,"useTitle":true,"cMax_items":3,"useBackground":false,"title":"","lazyLoad":false,"widgetChooser":"custom.widget.HeroBanner"},"__typename":"QuiltComponent"}],"__typename":"QuiltWrapperSection"},"footer":{"backgroundImageProps":{"assetName":null,"backgroundSize":"COVER","backgroundRepeat":"NO_REPEAT","backgroundPosition":"CENTER_CENTER","lastModified":null,"__typename":"BackgroundImageProps"},"backgroundColor":"transparent","items":[{"id":"custom.widget.MicrosoftFooter","props":{"widgetVisibility":"signedInOrAnonymous","useTitle":true,"useBackground":false,"title":"","lazyLoad":false},"__typename":"QuiltComponent"}],"__typename":"QuiltWrapperSection"},"__typename":"QuiltWrapper","localOverride":false},"localOverride":false},"CachedAsset:text:en_US-components/common/ActionFeedback-1745505307000":{"__typename":"CachedAsset","id":"text:en_US-components/common/ActionFeedback-1745505307000","value":{"joinedGroupHub.title":"Welcome","joinedGroupHub.message":"You are now a member of this group and are subscribed to updates.","groupHubInviteNotFound.title":"Invitation Not Found","groupHubInviteNotFound.message":"Sorry, we could not find your invitation to the group. The owner may have canceled the invite.","groupHubNotFound.title":"Group Not Found","groupHubNotFound.message":"The grouphub you tried to join does not exist. It may have been deleted.","existingGroupHubMember.title":"Already Joined","existingGroupHubMember.message":"You are already a member of this group.","accountLocked.title":"Account Locked","accountLocked.message":"Your account has been locked due to multiple failed attempts. Try again in {lockoutTime} minutes.","editedGroupHub.title":"Changes Saved","editedGroupHub.message":"Your group has been updated.","leftGroupHub.title":"Goodbye","leftGroupHub.message":"You are no longer a member of this group and will not receive future updates.","deletedGroupHub.title":"Deleted","deletedGroupHub.message":"The group has been deleted.","groupHubCreated.title":"Group Created","groupHubCreated.message":"{groupHubName} is ready to use","accountClosed.title":"Account Closed","accountClosed.message":"The account has been closed and you will now be redirected to the homepage","resetTokenExpired.title":"Reset Password Link has Expired","resetTokenExpired.message":"Try resetting your password again","invalidUrl.title":"Invalid URL","invalidUrl.message":"The URL you're using is not recognized. Verify your URL and try again.","accountClosedForUser.title":"Account Closed","accountClosedForUser.message":"{userName}'s account is closed","inviteTokenInvalid.title":"Invitation Invalid","inviteTokenInvalid.message":"Your invitation to the community has been canceled or expired.","inviteTokenError.title":"Invitation Verification Failed","inviteTokenError.message":"The url you are utilizing is not recognized. Verify your URL and try again","pageNotFound.title":"Access Denied","pageNotFound.message":"You do not have access to this area of the community or it doesn't exist","eventAttending.title":"Responded as Attending","eventAttending.message":"You'll be notified when there's new activity and reminded as the event approaches","eventInterested.title":"Responded as Interested","eventInterested.message":"You'll be notified when there's new activity and reminded as the event approaches","eventNotFound.title":"Event Not Found","eventNotFound.message":"The event you tried to respond to does not exist.","redirectToRelatedPage.title":"Showing Related Content","redirectToRelatedPageForBaseUsers.title":"Showing Related Content","redirectToRelatedPageForBaseUsers.message":"The content you are trying to access is archived","redirectToRelatedPage.message":"The content you are trying to access is archived","relatedUrl.archivalLink.flyoutMessage":"The content you are trying to access is archived View Archived Content"},"localOverride":false},"CachedAsset:component:custom.widget.HeroBanner-en-us-1746740527728":{"__typename":"CachedAsset","id":"component:custom.widget.HeroBanner-en-us-1746740527728","value":{"component":{"id":"custom.widget.HeroBanner","template":{"id":"HeroBanner","markupLanguage":"REACT","style":null,"texts":{"searchPlaceholderText":"Search this community","followActionText":"Follow","unfollowActionText":"Following","searchOnHoverText":"Please enter your search term(s) and then press return key to complete a search.","blogs.sidebar.pagetitle":"Latest Blogs | Microsoft Tech Community","followThisNode":"Follow this node","unfollowThisNode":"Unfollow this node"},"defaults":{"config":{"applicablePages":[],"description":null,"fetchedContent":null,"__typename":"ComponentConfiguration"},"props":[{"id":"max_items","dataType":"NUMBER","list":false,"defaultValue":"3","label":"Max Items","description":"The maximum number of items to display in the carousel","possibleValues":null,"control":"INPUT","__typename":"PropDefinition"}],"__typename":"ComponentProperties"},"components":[{"id":"custom.widget.HeroBanner","form":{"fields":[{"id":"widgetChooser","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"title","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"useTitle","validation":null,"noValidation":null,"dataType":"BOOLEAN","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"useBackground","validation":null,"noValidation":null,"dataType":"BOOLEAN","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"widgetVisibility","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"moreOptions","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"cMax_items","validation":null,"noValidation":null,"dataType":"NUMBER","list":false,"control":"INPUT","defaultValue":"3","label":"Max Items","description":"The maximum number of items to display in the carousel","possibleValues":null,"__typename":"FormField"}],"layout":{"rows":[{"id":"widgetChooserGroup","type":"fieldset","as":null,"items":[{"id":"widgetChooser","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"titleGroup","type":"fieldset","as":null,"items":[{"id":"title","className":null,"__typename":"FormFieldRef"},{"id":"useTitle","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"useBackground","type":"fieldset","as":null,"items":[{"id":"useBackground","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"widgetVisibility","type":"fieldset","as":null,"items":[{"id":"widgetVisibility","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"moreOptionsGroup","type":"fieldset","as":null,"items":[{"id":"moreOptions","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"componentPropsGroup","type":"fieldset","as":null,"items":[{"id":"cMax_items","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"}],"actionButtons":null,"className":"custom_widget_HeroBanner_form","formGroupFieldSeparator":"divider","__typename":"FormLayout"},"__typename":"Form"},"config":null,"props":[],"__typename":"Component"}],"grouping":"CUSTOM","__typename":"ComponentTemplate"},"properties":{"config":{"applicablePages":[],"description":null,"fetchedContent":null,"__typename":"ComponentConfiguration"},"props":[{"id":"max_items","dataType":"NUMBER","list":false,"defaultValue":"3","label":"Max Items","description":"The maximum number of items to display in the carousel","possibleValues":null,"control":"INPUT","__typename":"PropDefinition"}],"__typename":"ComponentProperties"},"form":{"fields":[{"id":"widgetChooser","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"title","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"useTitle","validation":null,"noValidation":null,"dataType":"BOOLEAN","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"useBackground","validation":null,"noValidation":null,"dataType":"BOOLEAN","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"widgetVisibility","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"moreOptions","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"cMax_items","validation":null,"noValidation":null,"dataType":"NUMBER","list":false,"control":"INPUT","defaultValue":"3","label":"Max Items","description":"The maximum number of items to display in the carousel","possibleValues":null,"__typename":"FormField"}],"layout":{"rows":[{"id":"widgetChooserGroup","type":"fieldset","as":null,"items":[{"id":"widgetChooser","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"titleGroup","type":"fieldset","as":null,"items":[{"id":"title","className":null,"__typename":"FormFieldRef"},{"id":"useTitle","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"useBackground","type":"fieldset","as":null,"items":[{"id":"useBackground","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"widgetVisibility","type":"fieldset","as":null,"items":[{"id":"widgetVisibility","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"moreOptionsGroup","type":"fieldset","as":null,"items":[{"id":"moreOptions","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"componentPropsGroup","type":"fieldset","as":null,"items":[{"id":"cMax_items","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"}],"actionButtons":null,"className":"custom_widget_HeroBanner_form","formGroupFieldSeparator":"divider","__typename":"FormLayout"},"__typename":"Form"},"__typename":"Component","localOverride":false},"globalCss":null,"form":{"fields":[{"id":"widgetChooser","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"title","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"useTitle","validation":null,"noValidation":null,"dataType":"BOOLEAN","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"useBackground","validation":null,"noValidation":null,"dataType":"BOOLEAN","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"widgetVisibility","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"moreOptions","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"cMax_items","validation":null,"noValidation":null,"dataType":"NUMBER","list":false,"control":"INPUT","defaultValue":"3","label":"Max Items","description":"The maximum number of items to display in the carousel","possibleValues":null,"__typename":"FormField"}],"layout":{"rows":[{"id":"widgetChooserGroup","type":"fieldset","as":null,"items":[{"id":"widgetChooser","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"titleGroup","type":"fieldset","as":null,"items":[{"id":"title","className":null,"__typename":"FormFieldRef"},{"id":"useTitle","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"useBackground","type":"fieldset","as":null,"items":[{"id":"useBackground","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"widgetVisibility","type":"fieldset","as":null,"items":[{"id":"widgetVisibility","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"moreOptionsGroup","type":"fieldset","as":null,"items":[{"id":"moreOptions","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"componentPropsGroup","type":"fieldset","as":null,"items":[{"id":"cMax_items","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"}],"actionButtons":null,"className":"custom_widget_HeroBanner_form","formGroupFieldSeparator":"divider","__typename":"FormLayout"},"__typename":"Form"}},"localOverride":false},"CachedAsset:component:custom.widget.MicrosoftFooter-en-us-1746740527728":{"__typename":"CachedAsset","id":"component:custom.widget.MicrosoftFooter-en-us-1746740527728","value":{"component":{"id":"custom.widget.MicrosoftFooter","template":{"id":"MicrosoftFooter","markupLanguage":"HANDLEBARS","style":".context-uhf {\n min-width: 280px;\n font-size: 15px;\n box-sizing: border-box;\n -ms-text-size-adjust: 100%;\n -webkit-text-size-adjust: 100%;\n & *,\n & *:before,\n & *:after {\n box-sizing: inherit;\n }\n a.c-uhff-link {\n color: #616161;\n word-break: break-word;\n text-decoration: none;\n }\n &a:link,\n &a:focus,\n &a:hover,\n &a:active,\n &a:visited {\n text-decoration: none;\n color: inherit;\n }\n & div {\n font-family: 'Segoe UI', SegoeUI, 'Helvetica Neue', Helvetica, Arial, sans-serif;\n }\n}\n.c-uhff {\n background: #f2f2f2;\n margin: -1.5625;\n width: auto;\n height: auto;\n}\n.c-uhff-nav {\n margin: 0 auto;\n max-width: calc(1600px + 10%);\n padding: 0 5%;\n box-sizing: inherit;\n &:before,\n &:after {\n content: ' ';\n display: table;\n clear: left;\n }\n @media only screen and (max-width: 1083px) {\n padding-left: 12px;\n }\n .c-heading-4 {\n color: #616161;\n word-break: break-word;\n font-size: 15px;\n line-height: 20px;\n padding: 36px 0 4px;\n font-weight: 600;\n }\n .c-uhff-nav-row {\n .c-uhff-nav-group {\n display: block;\n float: left;\n min-height: 1px;\n vertical-align: text-top;\n padding: 0 12px;\n width: 100%;\n zoom: 1;\n &:first-child {\n padding-left: 0;\n @media only screen and (max-width: 1083px) {\n padding-left: 12px;\n }\n }\n @media only screen and (min-width: 540px) and (max-width: 1082px) {\n width: 33.33333%;\n }\n @media only screen and (min-width: 1083px) {\n width: 16.6666666667%;\n }\n ul.c-list.f-bare {\n font-size: 11px;\n line-height: 16px;\n margin-top: 0;\n margin-bottom: 0;\n padding-left: 0;\n list-style-type: none;\n li {\n word-break: break-word;\n padding: 8px 0;\n margin: 0;\n }\n }\n }\n }\n}\n.c-uhff-base {\n background: #f2f2f2;\n margin: 0 auto;\n max-width: calc(1600px + 10%);\n padding: 30px 5% 16px;\n &:before,\n &:after {\n content: ' ';\n display: table;\n }\n &:after {\n clear: both;\n }\n a.c-uhff-ccpa {\n font-size: 11px;\n line-height: 16px;\n float: left;\n margin: 3px 0;\n }\n a.c-uhff-ccpa:hover {\n text-decoration: underline;\n }\n ul.c-list {\n font-size: 11px;\n line-height: 16px;\n float: right;\n margin: 3px 0;\n color: #616161;\n li {\n padding: 0 24px 4px 0;\n display: inline-block;\n }\n }\n .c-list.f-bare {\n padding-left: 0;\n list-style-type: none;\n }\n @media only screen and (max-width: 1083px) {\n display: flex;\n flex-wrap: wrap;\n padding: 30px 24px 16px;\n }\n}\n\n.social-share {\n position: fixed;\n top: 60%;\n transform: translateY(-50%);\n left: 0;\n z-index: 1000;\n}\n\n.sharing-options {\n list-style: none;\n padding: 0;\n margin: 0;\n display: block;\n flex-direction: column;\n background-color: white;\n width: 43px;\n border-radius: 0px 7px 7px 0px;\n}\n.linkedin-icon {\n border-top-right-radius: 7px;\n}\n.linkedin-icon:hover {\n border-radius: 0;\n}\n.social-share-rss-image {\n border-bottom-right-radius: 7px;\n}\n.social-share-rss-image:hover {\n border-radius: 0;\n}\n\n.social-link-footer {\n position: relative;\n display: block;\n margin: -2px 0;\n transition: all 0.2s ease;\n}\n.social-link-footer:hover .linkedin-icon {\n border-radius: 0;\n}\n.social-link-footer:hover .social-share-rss-image {\n border-radius: 0;\n}\n\n.social-link-footer img {\n width: 40px;\n height: auto;\n transition: filter 0.3s ease;\n}\n\n.social-share-list {\n width: 40px;\n}\n.social-share-rss-image {\n width: 40px;\n}\n\n.share-icon {\n border: 2px solid transparent;\n display: inline-block;\n position: relative;\n}\n\n.share-icon:hover {\n opacity: 1;\n border: 2px solid white;\n box-sizing: border-box;\n}\n\n.share-icon:hover .label {\n opacity: 1;\n visibility: visible;\n border: 2px solid white;\n box-sizing: border-box;\n border-left: none;\n}\n\n.label {\n position: absolute;\n left: 100%;\n white-space: nowrap;\n opacity: 0;\n visibility: hidden;\n transition: all 0.2s ease;\n color: white;\n border-radius: 0 10 0 10px;\n top: 50%;\n transform: translateY(-50%);\n height: 40px;\n border-radius: 0 6px 6px 0;\n display: flex;\n align-items: center;\n justify-content: center;\n padding: 20px 5px 20px 8px;\n margin-left: -1px;\n}\n.linkedin {\n background-color: #0474b4;\n}\n.facebook {\n background-color: #3c5c9c;\n}\n.twitter {\n background-color: white;\n color: black;\n}\n.reddit {\n background-color: #fc4404;\n}\n.mail {\n background-color: #848484;\n}\n.bluesky {\n background-color: white;\n color: black;\n}\n.rss {\n background-color: #ec7b1c;\n}\n#RSS {\n width: 40px;\n height: 40px;\n}\n\n@media (max-width: 991px) {\n .social-share {\n display: none;\n }\n}\n","texts":{"New tab":"What's New","New 1":"Surface Laptop Studio 2","New 2":"Surface Laptop Go 3","New 3":"Surface Pro 9","New 4":"Surface Laptop 5","New 5":"Surface Studio 2+","New 6":"Copilot in Windows","New 7":"Microsoft 365","New 8":"Windows 11 apps","Store tab":"Microsoft Store","Store 1":"Account Profile","Store 2":"Download Center","Store 3":"Microsoft Store Support","Store 4":"Returns","Store 5":"Order tracking","Store 6":"Certified Refurbished","Store 7":"Microsoft Store Promise","Store 8":"Flexible Payments","Education tab":"Education","Edu 1":"Microsoft in education","Edu 2":"Devices for education","Edu 3":"Microsoft Teams for Education","Edu 4":"Microsoft 365 Education","Edu 5":"How to buy for your school","Edu 6":"Educator Training and development","Edu 7":"Deals for students and parents","Edu 8":"Azure for students","Business tab":"Business","Bus 1":"Microsoft Cloud","Bus 2":"Microsoft Security","Bus 3":"Dynamics 365","Bus 4":"Microsoft 365","Bus 5":"Microsoft Power Platform","Bus 6":"Microsoft Teams","Bus 7":"Microsoft Industry","Bus 8":"Small Business","Developer tab":"Developer & IT","Dev 1":"Azure","Dev 2":"Developer Center","Dev 3":"Documentation","Dev 4":"Microsoft Learn","Dev 5":"Microsoft Tech Community","Dev 6":"Azure Marketplace","Dev 7":"AppSource","Dev 8":"Visual Studio","Company tab":"Company","Com 1":"Careers","Com 2":"About Microsoft","Com 3":"Company News","Com 4":"Privacy at Microsoft","Com 5":"Investors","Com 6":"Diversity and inclusion","Com 7":"Accessiblity","Com 8":"Sustainibility"},"defaults":{"config":{"applicablePages":[],"description":"The Microsoft Footer","fetchedContent":null,"__typename":"ComponentConfiguration"},"props":[],"__typename":"ComponentProperties"},"components":[{"id":"custom.widget.MicrosoftFooter","form":null,"config":null,"props":[],"__typename":"Component"}],"grouping":"CUSTOM","__typename":"ComponentTemplate"},"properties":{"config":{"applicablePages":[],"description":"The Microsoft Footer","fetchedContent":null,"__typename":"ComponentConfiguration"},"props":[],"__typename":"ComponentProperties"},"form":null,"__typename":"Component","localOverride":false},"globalCss":{"css":".custom_widget_MicrosoftFooter_context-uhf_105bp_1 {\n min-width: 17.5rem;\n font-size: 0.9375rem;\n box-sizing: border-box;\n -ms-text-size-adjust: 100%;\n -webkit-text-size-adjust: 100%;\n & *,\n & *:before,\n & *:after {\n box-sizing: inherit;\n }\n a.custom_widget_MicrosoftFooter_c-uhff-link_105bp_12 {\n color: #616161;\n word-break: break-word;\n text-decoration: none;\n }\n &a:link,\n &a:focus,\n &a:hover,\n &a:active,\n &a:visited {\n text-decoration: none;\n color: inherit;\n }\n & div {\n font-family: 'Segoe UI', SegoeUI, 'Helvetica Neue', Helvetica, Arial, sans-serif;\n }\n}\n.custom_widget_MicrosoftFooter_c-uhff_105bp_12 {\n background: #f2f2f2;\n margin: -1.5625;\n width: auto;\n height: auto;\n}\n.custom_widget_MicrosoftFooter_c-uhff-nav_105bp_35 {\n margin: 0 auto;\n max-width: calc(100rem + 10%);\n padding: 0 5%;\n box-sizing: inherit;\n &:before,\n &:after {\n content: ' ';\n display: table;\n clear: left;\n }\n @media only screen and (max-width: 1083px) {\n padding-left: 0.75rem;\n }\n .custom_widget_MicrosoftFooter_c-heading-4_105bp_49 {\n color: #616161;\n word-break: break-word;\n font-size: 0.9375rem;\n line-height: 1.25rem;\n padding: 2.25rem 0 0.25rem;\n font-weight: 600;\n }\n .custom_widget_MicrosoftFooter_c-uhff-nav-row_105bp_57 {\n .custom_widget_MicrosoftFooter_c-uhff-nav-group_105bp_58 {\n display: block;\n float: left;\n min-height: 0.0625rem;\n vertical-align: text-top;\n padding: 0 0.75rem;\n width: 100%;\n zoom: 1;\n &:first-child {\n padding-left: 0;\n @media only screen and (max-width: 1083px) {\n padding-left: 0.75rem;\n }\n }\n @media only screen and (min-width: 540px) and (max-width: 1082px) {\n width: 33.33333%;\n }\n @media only screen and (min-width: 1083px) {\n width: 16.6666666667%;\n }\n ul.custom_widget_MicrosoftFooter_c-list_105bp_78.custom_widget_MicrosoftFooter_f-bare_105bp_78 {\n font-size: 0.6875rem;\n line-height: 1rem;\n margin-top: 0;\n margin-bottom: 0;\n padding-left: 0;\n list-style-type: none;\n li {\n word-break: break-word;\n padding: 0.5rem 0;\n margin: 0;\n }\n }\n }\n }\n}\n.custom_widget_MicrosoftFooter_c-uhff-base_105bp_94 {\n background: #f2f2f2;\n margin: 0 auto;\n max-width: calc(100rem + 10%);\n padding: 1.875rem 5% 1rem;\n &:before,\n &:after {\n content: ' ';\n display: table;\n }\n &:after {\n clear: both;\n }\n a.custom_widget_MicrosoftFooter_c-uhff-ccpa_105bp_107 {\n font-size: 0.6875rem;\n line-height: 1rem;\n float: left;\n margin: 0.1875rem 0;\n }\n a.custom_widget_MicrosoftFooter_c-uhff-ccpa_105bp_107:hover {\n text-decoration: underline;\n }\n ul.custom_widget_MicrosoftFooter_c-list_105bp_78 {\n font-size: 0.6875rem;\n line-height: 1rem;\n float: right;\n margin: 0.1875rem 0;\n color: #616161;\n li {\n padding: 0 1.5rem 0.25rem 0;\n display: inline-block;\n }\n }\n .custom_widget_MicrosoftFooter_c-list_105bp_78.custom_widget_MicrosoftFooter_f-bare_105bp_78 {\n padding-left: 0;\n list-style-type: none;\n }\n @media only screen and (max-width: 1083px) {\n display: flex;\n flex-wrap: wrap;\n padding: 1.875rem 1.5rem 1rem;\n }\n}\n.custom_widget_MicrosoftFooter_social-share_105bp_138 {\n position: fixed;\n top: 60%;\n transform: translateY(-50%);\n left: 0;\n z-index: 1000;\n}\n.custom_widget_MicrosoftFooter_sharing-options_105bp_146 {\n list-style: none;\n padding: 0;\n margin: 0;\n display: block;\n flex-direction: column;\n background-color: white;\n width: 2.6875rem;\n border-radius: 0 0.4375rem 0.4375rem 0;\n}\n.custom_widget_MicrosoftFooter_linkedin-icon_105bp_156 {\n border-top-right-radius: 7px;\n}\n.custom_widget_MicrosoftFooter_linkedin-icon_105bp_156:hover {\n border-radius: 0;\n}\n.custom_widget_MicrosoftFooter_social-share-rss-image_105bp_162 {\n border-bottom-right-radius: 7px;\n}\n.custom_widget_MicrosoftFooter_social-share-rss-image_105bp_162:hover {\n border-radius: 0;\n}\n.custom_widget_MicrosoftFooter_social-link-footer_105bp_169 {\n position: relative;\n display: block;\n margin: -0.125rem 0;\n transition: all 0.2s ease;\n}\n.custom_widget_MicrosoftFooter_social-link-footer_105bp_169:hover .custom_widget_MicrosoftFooter_linkedin-icon_105bp_156 {\n border-radius: 0;\n}\n.custom_widget_MicrosoftFooter_social-link-footer_105bp_169:hover .custom_widget_MicrosoftFooter_social-share-rss-image_105bp_162 {\n border-radius: 0;\n}\n.custom_widget_MicrosoftFooter_social-link-footer_105bp_169 img {\n width: 2.5rem;\n height: auto;\n transition: filter 0.3s ease;\n}\n.custom_widget_MicrosoftFooter_social-share-list_105bp_188 {\n width: 2.5rem;\n}\n.custom_widget_MicrosoftFooter_social-share-rss-image_105bp_162 {\n width: 2.5rem;\n}\n.custom_widget_MicrosoftFooter_share-icon_105bp_195 {\n border: 2px solid transparent;\n display: inline-block;\n position: relative;\n}\n.custom_widget_MicrosoftFooter_share-icon_105bp_195:hover {\n opacity: 1;\n border: 2px solid white;\n box-sizing: border-box;\n}\n.custom_widget_MicrosoftFooter_share-icon_105bp_195:hover .custom_widget_MicrosoftFooter_label_105bp_207 {\n opacity: 1;\n visibility: visible;\n border: 2px solid white;\n box-sizing: border-box;\n border-left: none;\n}\n.custom_widget_MicrosoftFooter_label_105bp_207 {\n position: absolute;\n left: 100%;\n white-space: nowrap;\n opacity: 0;\n visibility: hidden;\n transition: all 0.2s ease;\n color: white;\n border-radius: 0 10 0 0.625rem;\n top: 50%;\n transform: translateY(-50%);\n height: 2.5rem;\n border-radius: 0 0.375rem 0.375rem 0;\n display: flex;\n align-items: center;\n justify-content: center;\n padding: 1.25rem 0.3125rem 1.25rem 0.5rem;\n margin-left: -0.0625rem;\n}\n.custom_widget_MicrosoftFooter_linkedin_105bp_156 {\n background-color: #0474b4;\n}\n.custom_widget_MicrosoftFooter_facebook_105bp_237 {\n background-color: #3c5c9c;\n}\n.custom_widget_MicrosoftFooter_twitter_105bp_240 {\n background-color: white;\n color: black;\n}\n.custom_widget_MicrosoftFooter_reddit_105bp_244 {\n background-color: #fc4404;\n}\n.custom_widget_MicrosoftFooter_mail_105bp_247 {\n background-color: #848484;\n}\n.custom_widget_MicrosoftFooter_bluesky_105bp_250 {\n background-color: white;\n color: black;\n}\n.custom_widget_MicrosoftFooter_rss_105bp_254 {\n background-color: #ec7b1c;\n}\n#custom_widget_MicrosoftFooter_RSS_105bp_1 {\n width: 2.5rem;\n height: 2.5rem;\n}\n@media (max-width: 991px) {\n .custom_widget_MicrosoftFooter_social-share_105bp_138 {\n display: none;\n }\n}\n","tokens":{"context-uhf":"custom_widget_MicrosoftFooter_context-uhf_105bp_1","c-uhff-link":"custom_widget_MicrosoftFooter_c-uhff-link_105bp_12","c-uhff":"custom_widget_MicrosoftFooter_c-uhff_105bp_12","c-uhff-nav":"custom_widget_MicrosoftFooter_c-uhff-nav_105bp_35","c-heading-4":"custom_widget_MicrosoftFooter_c-heading-4_105bp_49","c-uhff-nav-row":"custom_widget_MicrosoftFooter_c-uhff-nav-row_105bp_57","c-uhff-nav-group":"custom_widget_MicrosoftFooter_c-uhff-nav-group_105bp_58","c-list":"custom_widget_MicrosoftFooter_c-list_105bp_78","f-bare":"custom_widget_MicrosoftFooter_f-bare_105bp_78","c-uhff-base":"custom_widget_MicrosoftFooter_c-uhff-base_105bp_94","c-uhff-ccpa":"custom_widget_MicrosoftFooter_c-uhff-ccpa_105bp_107","social-share":"custom_widget_MicrosoftFooter_social-share_105bp_138","sharing-options":"custom_widget_MicrosoftFooter_sharing-options_105bp_146","linkedin-icon":"custom_widget_MicrosoftFooter_linkedin-icon_105bp_156","social-share-rss-image":"custom_widget_MicrosoftFooter_social-share-rss-image_105bp_162","social-link-footer":"custom_widget_MicrosoftFooter_social-link-footer_105bp_169","social-share-list":"custom_widget_MicrosoftFooter_social-share-list_105bp_188","share-icon":"custom_widget_MicrosoftFooter_share-icon_105bp_195","label":"custom_widget_MicrosoftFooter_label_105bp_207","linkedin":"custom_widget_MicrosoftFooter_linkedin_105bp_156","facebook":"custom_widget_MicrosoftFooter_facebook_105bp_237","twitter":"custom_widget_MicrosoftFooter_twitter_105bp_240","reddit":"custom_widget_MicrosoftFooter_reddit_105bp_244","mail":"custom_widget_MicrosoftFooter_mail_105bp_247","bluesky":"custom_widget_MicrosoftFooter_bluesky_105bp_250","rss":"custom_widget_MicrosoftFooter_rss_105bp_254","RSS":"custom_widget_MicrosoftFooter_RSS_105bp_1"}},"form":null},"localOverride":false},"CachedAsset:text:en_US-components/community/Breadcrumb-1745505307000":{"__typename":"CachedAsset","id":"text:en_US-components/community/Breadcrumb-1745505307000","value":{"navLabel":"Breadcrumbs","dropdown":"Additional parent page navigation"},"localOverride":false},"CachedAsset:text:en_US-components/tags/TagsHeaderWidget-1745505307000":{"__typename":"CachedAsset","id":"text:en_US-components/tags/TagsHeaderWidget-1745505307000","value":{"tag":"{tagName}","topicsCount":"{count} {count, plural, one {Topic} other {Topics}}"},"localOverride":false},"CachedAsset:text:en_US-components/messages/MessageListForNodeByRecentActivityWidget-1745505307000":{"__typename":"CachedAsset","id":"text:en_US-components/messages/MessageListForNodeByRecentActivityWidget-1745505307000","value":{"title@userScope:other":"Recent Content","title@userScope:self":"Contributions","title@board:FORUM@userScope:other":"Recent Discussions","title@board:BLOG@userScope:other":"Recent Blogs","emptyDescription":"No content to show","MessageListForNodeByRecentActivityWidgetEditor.nodeScope.label":"Scope","title@instance:1722894000155":"Recent Discussions","title@instance:1727367112619":"Recent Blog Articles","title@instance:1727367069748":"Recent Discussions","title@instance:1727366213114":"Latest Discussions","title@instance:1727899609720":"","title@instance:1727363308925":"Latest Discussions","title@instance:1737115580352":"Latest Articles","title@instance:1720453418992":"Recent Discssions","title@instance:1727365950181":"Latest Blog Articles","title@instance:bmDPnI":"Latest Blog Articles","title@instance:IiDDJZ":"Latest Blog Articles","title@instance:1721244347979":"Latest blog posts","title@instance:1728383752171":"Related Content","title@instance:1722893956545":"Latest Skilling Resources","title@instance:dhcgCU":"Latest Discussions"},"localOverride":false},"Category:category:Exchange":{"__typename":"Category","id":"category:Exchange","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:Outlook":{"__typename":"Category","id":"category:Outlook","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:Community-Info-Center":{"__typename":"Category","id":"category:Community-Info-Center","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:EducationSector":{"__typename":"Category","id":"category:EducationSector","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:DrivingAdoption":{"__typename":"Category","id":"category:DrivingAdoption","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:Azure":{"__typename":"Category","id":"category:Azure","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:Windows-Server":{"__typename":"Category","id":"category:Windows-Server","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:MicrosoftTeams":{"__typename":"Category","id":"category:MicrosoftTeams","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:PublicSector":{"__typename":"Category","id":"category:PublicSector","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:microsoft365":{"__typename":"Category","id":"category:microsoft365","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:IoT":{"__typename":"Category","id":"category:IoT","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:HealthcareAndLifeSciences":{"__typename":"Category","id":"category:HealthcareAndLifeSciences","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:ITOpsTalk":{"__typename":"Category","id":"category:ITOpsTalk","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:MicrosoftLearn":{"__typename":"Category","id":"category:MicrosoftLearn","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Blog:board:MicrosoftLearnBlog":{"__typename":"Blog","id":"board:MicrosoftLearnBlog","blogPolicies":{"__typename":"BlogPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}},"boardPolicies":{"__typename":"BoardPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:AI":{"__typename":"Category","id":"category:AI","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:MicrosoftMechanics":{"__typename":"Category","id":"category:MicrosoftMechanics","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:MicrosoftforNonprofits":{"__typename":"Category","id":"category:MicrosoftforNonprofits","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:StartupsatMicrosoft":{"__typename":"Category","id":"category:StartupsatMicrosoft","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:PartnerCommunity":{"__typename":"Category","id":"category:PartnerCommunity","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:Microsoft365Copilot":{"__typename":"Category","id":"category:Microsoft365Copilot","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:Windows":{"__typename":"Category","id":"category:Windows","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:Content_Management":{"__typename":"Category","id":"category:Content_Management","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:microsoft-security":{"__typename":"Category","id":"category:microsoft-security","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:microsoftintune":{"__typename":"Category","id":"category:microsoftintune","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Conversation:conversation:4081775":{"__typename":"Conversation","id":"conversation:4081775","topic":{"__typename":"BlogTopicMessage","uid":4081775},"lastPostingActivityTime":"2025-02-14T01:33:16.508-08:00","solved":false},"User:user:988334":{"__typename":"User","uid":988334,"login":"paolosalvatori","registrationData":{"__typename":"RegistrationData","status":null},"deleted":false,"avatar":{"__typename":"UserAvatar","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/dS05ODgzMzQtMzg1MjYyaTE4QTU5MkIyQUVCMkM0MDE"},"id":"user:988334"},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDgxNzc1LTU2MDI0MGk4QTVBRENFMDBGOUM0QUMz?revision=4\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDgxNzc1LTU2MDI0MGk4QTVBRENFMDBGOUM0QUMz?revision=4","title":"architecture.png","associationType":"TEASER","width":1114,"height":713,"altText":"architecture.png"},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDgxNzc1LTU2MDI0M2k3RUU0Q0Q0MTZERDRERTlD?revision=4\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDgxNzc1LTU2MDI0M2k3RUU0Q0Q0MTZERDRERTlD?revision=4","title":"architecture.png","associationType":"BODY","width":1114,"height":713,"altText":"architecture.png"},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDgxNzc1LTU2MDI0NGlDMEQwMzdCMEJFOTY5NzYy?revision=4\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDgxNzc1LTU2MDI0NGlDMEQwMzdCMEJFOTY5NzYy?revision=4","title":"flow.png","associationType":"BODY","width":588,"height":1030,"altText":"flow.png"},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDgxNzc1LTU2MDI0Nmk2RkU5OUIxMDhEOTVBMzUw?revision=4\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDgxNzc1LTU2MDI0Nmk2RkU5OUIxMDhEOTVBMzUw?revision=4","title":"httpbin.png","associationType":"BODY","width":1265,"height":1230,"altText":"httpbin.png"},"BlogTopicMessage:message:4081775":{"__typename":"BlogTopicMessage","subject":"End-to-end TLS with AKS, Azure Front Door, Azure Private Link Service, and NGINX Ingress Controller","conversation":{"__ref":"Conversation:conversation:4081775"},"id":"message:4081775","revisionNum":4,"uid":4081775,"depth":0,"board":{"__ref":"Blog:board:FastTrackforAzureBlog"},"author":{"__ref":"User:user:988334"},"teaser@stripHtml({\"removeProcessingText\":true,\"truncateLength\":-1})":" This article shows how Azure Front Door Premium can be set to use a Private Link Service to expose an AKS-hosted workload via NGINX Ingress Controller configured to use a private IP address on the internal load balancer. \n \n   \n   ","introduction":"","metrics":{"__typename":"MessageMetrics","views":16278},"postTime":"2024-03-11T09:36:36.274-07:00","lastPublishTime":"2025-02-14T01:33:16.508-08:00","body@stripHtml({\"removeProcessingText\":true,\"removeSpoilerMarkup\":true,\"removeTocMarkup\":true,\"truncateLength\":-1})":" To ensure your security and compliance requirements are met, Azure Front Door offers comprehensive end-to-end TLS encryption. For more information, see End-to-end TLS with Azure Front Door support. With Front Door's TLS/SSL offload capability, the TLS connection is terminated and the incoming traffic is decrypted at the Front Door. The traffic is then re-encrypted before being forwarded to the origin, that in this project is represented by a web application hosted in an Azure Kubernetes Service cluster. The sample application is exposed via a managed or unmanaged NGINX Ingress Controller: \n \n Managed: a managed NGINX ingress controller is deployed using the application routing add-on for AKS. The deployment script configures the managed NGINX ingress controller to use a private IP address as a frontend IP configuration of the kubernetes-internal internal load balancer. For more information, see Configure NGINX ingress controller to support Azure private DNS zone with application routing add-on. \n Unmanaged: an unmanaged NGINX ingress controller is deployed via Helm. The deployment script configures the unmanaged NGINX ingress controller to use a private IP address as a frontend IP configuration of the kubernetes-internal internal load balancer. For more information, see Create an ingress controller using an internal IP address. \n \n To enhance security, HTTPS is configured as the forwarding protocol on Azure Front Door when connecting to the AKS-hosted workload configured as a origin. This practice ensures that end-to-end TLS encryption is enforced for the entire request process, from the client to the origin. \n Azure Front Door Premium can connect to a backend application via Azure Private Link Service (PLS). For more information, see Secure your Origin with Private Link in Azure Front Door Premium. If you deploy a private origin using Azure Front Door Premium and the Azure Private Link Service (PLS), TLS/SSL offload is fully supported. \n This article demonstrates how to set up end-to-end TLS encryption using Azure Front Door Premium and Azure Kubernetes Service (AKS). In addition, it shows how to use Azure Front Door Premium, Azure Web Application Firewall, and Azure Private Link Service (PLS) to securely expose and protect a workload running in Azure Kubernetes Service(AKS). The sample application is exposed via the NGINX Ingress Controller configured to use a private IP address as a frontend IP configuration of the  kubernetes-internal  internal load balancer. For more information, see Create an ingress controller using an internal IP address. \n This sample also shows how to deploy an Azure Kubernetes Service cluster with the API Server VNET Integration and how to use an Azure NAT Gateway to manage outbound connections initiated by AKS-hosted workloads. AKS clusters with API Server VNET integration provide a series of advantages, for example, they can have public network access or private cluster mode enabled or disabled without redeploying the cluster. For more information, see Create an Azure Kubernetes Service cluster with API Server VNet Integration. \n If it is not necessary to implement end-to-end TLS and if the Front Door route can be set up to utilize HTTP instead of HTTPS for calling the downstream AKS-hosted workload, you may refer to the following resource: How to expose NGINX Ingress Controller via Azure Front Door and Azure Private Link Service. You can find the companion code for this article in this GitHub repository. \n \n Prerequisites \n \n \n An active Azure subscription. If you don't have one, create a free Azure account before you begin. \n Visual Studio Code installed on one of the supported platforms along with the Bicep extension. \n Azure CLI version 2.58.0 or later installed. to install or upgrade, see Install Azure CLI. \n An existing Azure Key Vault resource with a valid TLS certificate for the sample web application. \n An existing Azure DNS Zone for the name resolution of the Azure Front Door custom domain via a CNAME record. \n \n \n Architecture \n \n This sample provides a set of Bicep modules to deploy and configure an Azure Front Door Premium with an WAF Policy as global load balancer in front of a public or a private AKS cluster with API Server VNET Integration. You can can either configure your AKS cluster to use Azure CNI with Dynamic IP Allocation or Azure CNI Overlay networking. In addition, the deployment configures the AKS cluster with the Azure Key Vault provider for Secrets Store CSI Driver that allows for the integration of an Azure Key Vault as a secret store with an Azure Kubernetes Service (AKS) cluster via a CSI volume. \n The following diagram shows the architecture and network topology deployed by the project when the AKS cluster is configured to use Azure CNI with Dynamic IP Allocation: \n \n   \n A Deployment Script is used to optionally install an unmanaged instance of the NGINX Ingress Controller, configured to use a private IP address as frontend IP configuration of the  kubernetes-internal  internal load balancer, via Helm and a sample httpbin web application via YAML manifests. The script defines a  SecretProviderClass  to read the TLS certificate from the source Azure Key Vault and creates a Kubernetes  secret . The  deployment  and  ingress  objects are configured to use the certificate contained in the Kubernetes secret. \n The Origin child resource of the Azure Front Door Premium global load balancer is configured to call the sample application using the HTTP forwarding protocol via the Azure Private Link Service, the AKS the  kubernetes-internal  internal load balancer, and the NGINX Ingress Controller. \n Bicep modules are parametric, so you can choose any network plugin: \n   \n \n Azure CNI with static IP allocation \n Azure CNI with dynamic IP allocation \n Azure CNI Powered by Cilium \n Azure CNI Overlay \n BYO CNI \n Kubenet \n \n   \n \n NOTE The sample was tested only with Azure CNI and Azure CNI Overlay \n \n In addition, the project shows how to deploy an Azure Kubernetes Service cluster with the following extensions and features: \n   \n \n Istio-based service mesh add-on for Azure Kubernetes Service provides an officially supported and tested Istio integration for Azure Kubernetes Service (AKS). \n API Server VNET Integration allows you to enable network communication between the API server and the cluster nodes without requiring a private link or tunnel. AKS clusters with API Server VNET integration provide a series of advantages, for example, they can have public network access or private cluster mode enabled or disabled without redeploying the cluster. For more information, see Create an Azure Kubernetes Service cluster with API Server VNet Integration. \n Azure NAT Gateway to manage outbound connections initiated by AKS-hosted workloads. \n Event-driven Autoscaling (KEDA) add-on is a single-purpose and lightweight component that strives to make application autoscaling simple and is a CNCF Incubation project. \n Dapr extension for Azure Kubernetes Service (AKS) allows you to install Dapr, a portable, event-driven runtime that simplifies building resilient, stateless, and stateful applications that run on the cloud and edge and embrace the diversity of languages and developer frameworks. With its sidecar architecture, Dapr helps you tackle the challenges that come with building microservices and keeps your code platform agnostic. \n Flux V2 extension allows to deploy workloads to an Azure Kubernetes Service (AKS) cluster via GitOps. For more information, see GitOps Flux v2 configurations with AKS and Azure Arc-enabled Kubernetes \n Vertical Pod Autoscaling allows you to automatically sets resource requests and limits on containers per workload based on past usage. VPA makes certain pods are scheduled onto nodes that have the required CPU and memory resources. For more information, see Kubernetes Vertical Pod Autoscaling. \n Azure Key Vault Provider for Secrets Store CSI Driver provides a variety of methods of identity-based access to your Azure Key Vault. \n Image Cleaner to clean up stale images on your Azure Kubernetes Service cluster. \n Azure Kubernetes Service (AKS) Network Observability is an important part of maintaining a healthy and performant Kubernetes cluster. By collecting and analyzing data about network traffic, you can gain insights into how your cluster is operating and identify potential problems before they cause outages or performance degradation. \n Managed NGINX ingress with the application routing add-on. \n \n In a production environment, we strongly recommend deploying a private AKS cluster with Uptime SLA. For more information, see private AKS cluster with a Public DNS address. Alternatively, you can deploy a public AKS cluster and secure access to the API server using authorized IP address ranges. \n The Bicep modules deploy or use the following Azure resources: \n \n Microsoft.ContainerService/managedClusters: A public or private Azure Kubernetes Service(AKS) cluster composed of the following node pools:\n \n A  system  node pool in a dedicated subnet. The default node pool hosts only critical system pods and services. The worker nodes have node taint which prevents application pods from beings scheduled on this node pool. \n A  user  node pool hosting user workloads and artifacts in a dedicated subnet. \n \n \n Microsoft.Authorization/roleDefinitions: the Bicep modules create the following role assignments:\n \n a  Grafana Admin  role assignment on the Azure Managed Grafana for the Microsoft Entra ID user whose objectID is defined in the  userId  parameter. The  Grafana Admin  role provides full control of the instance including managing role assignments, viewing, editing, and configuring data sources. For more information, see How to share access to Azure Managed Grafana. \n a  Key Vault Administrator  role assignment on the existing Azure Key Vault resource which contains the TLS certificate for the user-defined managed identity used by the Azure Key Vault provider for Secrets Store CSI Driver. This assignment is necessary to let the CSI driver read the certificate from the source Key Vault. \n \n \n Microsoft.Cdn/profiles: an Azure Front Door Premium resource used to expose the AKS-hosted sample application via Azure Private Link Service and NGINX Ingress Controller. The Bicep module creates the following child resources to expose the workload:\n \n Microsoft.Cdn/profiles/originGroups: an Origin Group in Azure Front Door refers to a set of Origins that receives similar traffic for their application. You can define the Origin Group as a logical grouping of your application instances across the world that receives the same traffic and responds with an expected behavior. These Origins can be deployed across different regions or within the same region. All origins can be deployed in an Active/Active or Active/Passive configuration. \n Microsoft.Cdn/profiles/originGroups/origins: an Origin refers to the application deployment exposed via Azure Front Door. An Origin defines properties of the underlying backend application like its type, weight, priority, host header, and more. In this sample, the Origin is configured to call the httpbin web application via an Azure Private Link Service. \n Microsoft.Cdn/profiles/afdEndpoints: in Azure Front Door Standard/Premium, an endpoint is a logical grouping of one or more routes that are associated with domain names. Each endpoint is assigned a domain name by Front Door, and you can associate your own custom domains by using routes. \n Microsoft.Cdn/profiles/secrets: this resource is used to store and manage the TLS certificate from Azure Key Vault. This certificate is used by the custom domain. \n Microsoft.Cdn/profiles/customDomains: this resource allows you to configure and manage a custom domain name for the Front Door endpoint. The custom domain is configured to use the Front Door secret that contains the TLS certificate. \n Microsoft.Cdn/profiles/securityPolicies: a security policy associates a WAF policy to a list of domains and paths. For more information, see Security and Azure Front Door. \n Microsoft.Network/FrontDoorWebApplicationFirewallPolicies: Azure Web Application Firewall (WAF) on Azure Front Door provides centralized protection for your web applications. WAF defends your web services against common exploits and vulnerabilities. It keeps your service highly available for your users and helps you meet compliance requirements. You can configure a WAF policy and associate that policy to one or more Front Door front-ends for protection. The WAF policy deployed by this sample consists of three types of security rules:\n \n Custom rules are used to block incoming requests based on the content of the payload, querystring, HTTP request method, IP address of the caller, and more. This sample add a couple of customer rules to block calls coming from a given IP range or calls that contain the word  blockme  in the querystring. \n OWASP Azure-managed rule sets provide an easy way to deploy protection against a common set of security threats like SQL injection or cross-site scripting. \n Bot protection rule set can be used to take custom actions on requests from known bot categories. \n \n \n \n \n Microsoft.Network/dnsZones: this resource references an existing Azure DNS zone used for the name resolution of the Azure Front Door custom domain. You can use Azure DNS to host your DNS domain and manage your DNS records.\n \n Microsoft.Network/dnsZones/CNAME: this CNAME record is used to create an alias or pointer from one domain name to another. With this resource, you can configure a CNAME record to redirect DNS queries for the custom domain to the original hostname of the Azure Front Door endpoint. \n Microsoft.Network/dnsZones/TXT: this resource represents a Text (TXT) record within a DNS zone. A TXT record allows you to store arbitrary text information associated with a domain. In this project, the TXT record contains the validation token for the custom domain. \n \n \n Microsoft.Network/privateLinkServices: an Azure Private Link Service is configured to reference the  kubernetes-internal  internal load balancer of the AKS cluster.\n \n Microsoft.Cdn/profiles/afdEndpoints/routes: a route defines properties such as custom domains, http redirect, supported protocols, and origin path that specify how to invoke the backend application. For more information, see Routing architecture overview. \n \n \n Microsoft.ContainerService/managedClusters: A public or private AKS cluster composed of a:\n \n system  node pool in a dedicated subnet. The default node pool hosts only critical system pods and services. The worker nodes have node taint which prevents application pods from beings scheduled on this node pool. \n user  node pool hosting user workloads and artifacts in a dedicated subnet. \n \n \n Microsoft.Network/virtualNetworks: a new virtual network with six subnets:\n \n SystemSubnet : this subnet is used for the agent nodes of the  system  node pool. \n UserSubnet : this subnet is used for the agent nodes of the  user  node pool. \n PodSubnet : this subnet is used to allocate private IP addresses to pods dynamically. \n ApiServerSubnet : API Server VNET Integration projects the API server endpoint directly into this delegated subnet in the virtual network where the AKS cluster is deployed. \n AzureBastionSubnet : a subnet for the Azure Bastion Host. \n VmSubnet : a subnet for a jump-box virtual machine used to connect to the (private) AKS cluster and for the private endpoints. \n \n \n Microsoft.ManagedIdentity/userAssignedIdentities: a user-defined managed identity used by the AKS cluster to create additional resources like load balancers and managed disks in Azure. \n Microsoft.Compute/virtualMachines: Bicep modules create a jump-box virtual machine to manage the private AKS cluster. \n Microsoft.Network/bastionHosts: a separate Azure Bastion is deployed in the AKS cluster virtual network to provide SSH connectivity to both agent nodes and virtual machines. \n Microsoft.Storage/storageAccounts: this storage account is used to store the boot diagnostics logs of both the service provider and service consumer virtual machines. Boot Diagnostics is a debugging feature that allows you to view console output and screenshots to diagnose virtual machine status. \n Microsoft.ContainerRegistry/registries: an Azure Container Registry (ACR) to build, store, and manage container images and artifacts in a private registry for all container deployments. \n Microsoft.KeyVault/vaults: an existing Azure Key Vault used to store secrets, certificates, and keys that can be mounted as files by pods using Azure Key Vault Provider for Secrets Store CSI Driver. For more information, see Use the Azure Key Vault Provider for Secrets Store CSI Driver in an AKS cluster and Provide an identity to access the Azure Key Vault Provider for Secrets Store CSI Driver. In this project, the existing Key Vault resource contains the TLS certificate used by the  ingress  Kubernetes object and by the custom domain of the Azure Front Door endpoint. \n Microsoft.Network/privateEndpoints: an Azure Private Endpoints is created for each of the following resources:\n \n Azure Container Registry \n Azure Key Vault \n Azure Storage Account \n API Server when deploying a private AKS cluster. \n \n \n Microsoft.Network/privateDnsZones: an Azure Private DNS Zone is created for each of the following resources:\n \n Azure Container Registry \n Azure Key Vault \n Azure Storage Account \n API Server when deploying a private AKS cluster. \n \n \n Microsoft.Network/networkSecurityGroups: subnets hosting virtual machines and Azure Bastion Hosts are protected by Azure Network Security Groups that are used to filter inbound and outbound traffic. \n Microsoft.Monitor/accounts: An Azure Monitor workspace is a unique environment for data collected by Azure Monitor. Each workspace has its own data repository, configuration, and permissions. Log Analytics workspaces contain logs and metrics data from multiple Azure resources, whereas Azure Monitor workspaces currently contain only metrics related to Prometheus. Azure Monitor managed service for Prometheus allows you to collect and analyze metrics at scale using a Prometheus-compatible monitoring solution, based on the Prometheus. This fully managed service allows you to use the Prometheus query language (PromQL) to analyze and alert on the performance of monitored infrastructure and workloads without having to operate the underlying infrastructure. The primary method for visualizing Prometheus metrics is Azure Managed Grafana. You can connect your Azure Monitor workspace to an Azure Managed Grafana to visualize Prometheus metrics using a set of built-in and custom Grafana dashboards. \n Microsoft.Dashboard/grafana: an Azure Managed Grafana instance used to visualize the Prometheus metrics generated by the Azure Kubernetes Service(AKS) cluster deployed by the Bicep modules. Azure Managed Grafana](https://learn.microsoft.com/en-us/azure/managed-grafana/overview) is a fully managed service for analytics and monitoring solutions. It's supported by Grafana Enterprise, which provides extensible data visualizations. This managed service allows to quickly and easily deploy Grafana dashboards with built-in high availability and control access with Azure security. \n Microsoft.OperationalInsights/workspaces: a centralized Azure Log Analytics workspace is used to collect the diagnostics logs and metrics from all the Azure resources:\n \n Azure Kubernetes Service cluster \n Azure Key Vault \n Azure Network Security Group \n Azure Container Registry \n Azure Storage Account \n \n \n Microsoft.Resources/deploymentScripts: a deployment script is used to run the install-front-door-end-to-end-tls.sh Bash script which installs the httpbin web application via YAML templates and the following packages to the AKS cluster via Helm. For more information on deployment scripts, see Use deployment scripts in Bicep\n \n (Optional) NGINX ingress controller via Helm if you opted to use an unmanaged NGINX ingress controller. \n (Optional) Cert-manager \n (Optional) Prometheus and Grafana \n \n \n \n \n NOTE AKS nodes can be referenced in the load balancer backend pools by either their IP configuration (Azure Virtual Machine Scale Sets based membership) or by their IP address only. Utilizing the IP address based backend pool membership provides higher efficiencies when updating services and provisioning load balancers, especially at high node counts. Provisioning new clusters with IP based backend pools and converting existing clusters is now supported. When combined with NAT Gateway or user-defined routing egress types, provisioning of new nodes and services are more performant. Two different pool membership types are available: \n \n nodeIPConfiguration : legacy Virtual Machine Scale Sets IP configuration based pool membership type \n nodeIP : IP-based membership type \n \n Azure Private Link Service does not support Azure Load balancers configured to use with backend addresses set by (virtualNetwork, ipAddress) or (subnet, ipAddress). Hence,  nodeIP  backend pool type is not currently supported if you want to create Azure Private Link Service based on an AKS load balancer. For this reason, this project adopts the  nodeIPConfiguration  membership type for the backend pools. \n \n \n NOTE At the end of the deployment, the  deploy.sh  performs additional steps to approve the Azure Private Link Service connection from Azure Front Door. For more information, see Secure your Origin with Private Link in Azure Front Door Premium. If you don't use the  deploy.sh  script to deploy the Bicep modules, you must approve the private endpoint connection before traffic can pass to the origin privately. You can approve private endpoint connections by using the Azure portal, Azure CLI, or Azure PowerShell. For more information, see Manage a Private Endpoint connection. \n \n \n NOTE You can find the  architecture.vsdx  file used for the diagram under the  visio  folder. \n \n \n Message Flow \n \n The following diagram illustrates the steps involved in the message flow during deployment and runtime. \n \n   \n \n Deployment Time \n \n The deployment time steps are as follows: \n \n A security engineer generates a certificate for the custom domain used by the workload and saves it in an Azure Key Vault. You can obtain a valid certificate from a well-known certification authority (CA), or use a solution like Key Vault Acmebot to acquire a certificate from one of the following ACME v2 compliant Certification Authority:\n \n Let's Encrypt \n Buypass Go SSL \n ZeroSSL (Requires EAB Credentials) \n Google Trust Services (Requires EAB Credentials) \n SSL.com (Requires EAB Credentials) \n Entrust (Requires EAB Credentials) \n \n \n A platform engineer specifies the necessary information in the  main.bicepparams  Bicep parameters file and deploys the Bicep modules to create the Azure resources. This includes:\n \n A prefix for the Azure resources \n The name and resource group of the existing Azure Key Vault that holds the TLS certificate for the workload hostname and Front Door custom domain. \n The name of the certificate in the Key Vault. \n The name and resource group of the DNS zone used for resolving the Front Door custom domain. \n \n \n The Deployment Script creates the following objects in the AKS cluster:\n \n A Kubernetes deployment and service for the sample httpbin web application. \n A Kubernetes ingress object to expose the web application via the NGINX ingress controller. \n A SecretProviderClass custom resource that retrieves the TLS certificate from the specified Azure Key Vault by using the user-defined managed identity of the Azure Key Vault provider for Secrets Store CSI Driver. This component creates a Kubernetes secret containing the TLS certificate referenced by the ingress object. \n (Optional) NGINX ingress controller via Helm if you opted to use an unmanaged NGINX ingress controller. \n (Optional) Cert-manager \n (Optional) Prometheus and Grafana \n \n \n A Front Door secret resource is used to manage and store the TLS certificate from the Azure Key Vault. This certificate is used by the custom domain associated with the Azure Front Door endpoint. \n \n \n Runtime \n \n During runtime, the message flow for a request initiated by an external client application is as follows: \n \n The client application sends a request to the web application using its custom domain. The DNS zone associated with the custom domain uses a CNAME record to redirect the DNS query for the custom domain to the original hostname of the Azure Front Door endpoint. \n The request is sent to one of the Azure Front Door points-of-presence. \n Azure Front Door forwards the incoming request to the Azure Private Endpoint connected to the Azure Private Link Service used to expose the AKS-hosted workload. \n The request is sent to the Azure Private Link Service. \n The request is forwarded to the  kubernetes-internal  AKS internal load balancer. \n The request is sent to one of the agent nodes hosting a pod of the NGINX Ingress Controller. \n The request is handled by one of the NGINX Ingress Controller replicas \n The NGINX Ingress Controller forwards the request to one of the workload pods. \n \n \n End-to-End TLS in Azure Front Door \n \n Azure Front Door supports end-to-end TLS encryption to meet security and compliance requirements. TLS/SSL offload is employed, where the TLS connection is terminated at Azure Front Door, decrypting the traffic and re-encrypting it before forwarding it to the origin. When using the origin's public IP address, configuring HTTPS as the forwarding protocol is recommended for enhanced security. This ensures enforcement of end-to-end TLS encryption throughout the request processing from client to origin. Additionally, TLS/SSL offload is supported when deploying a private origin with Azure Front Door Premium via the Azure Private Link Service (PLS) feature. For more information, see End-to-end TLS with Azure Front Door. \n \n Custom Domains in Azure Front Door and their Advantages \n \n When configuring custom domains in Azure Front Door, you have two options: using a custom domain equal to the original hostname of the workload or using a custom domain that differs from the original hostname. Using a custom domain equal to the original hostname provides the following advantages: \n \n Simplified configuration without additional DNS management. \n Maintenance of search engine optimization (SEO) benefits and branding consistency. \n Hostname and custom domain consistency across Front Door and the downstream workload. \n Need for a single certificate across the Azure Front Door resource and the workload. \n \n \n Origin TLS Connection and Frontend TLS Connection \n \n For HTTPS connections in Azure Front Door, the origin must present a certificate from a valid CA, with a subject name matching the origin hostname. Front Door refuses the connection if the presented certificate lacks the appropriate subject name, resulting in an error for the client. Frontend TLS connections from the client to Azure Front Door can be enabled with a certificate managed by Azure Front Door or by using your own certificate. \n \n Certificate Autorotation \n \n Azure Front Door provides certificate autorotation for managed certificates. Managed certificates are automatically rotated within 90 days of expiry for Azure Front Door managed certificates and within 45 days for Azure Front Door Standard/Premium managed certificates. For custom TLS/SSL certificates, autorotation occurs within 3-4 days when a newer version is available in the key vault. It's possible to manually select a specific version for custom certificates, but autorotation is not supported in that case. The service principal for Front Door must have access to the key vault containing the certificate. The certificate rollout operation by Azure Front Door doesn't cause any downtime, as long as the certificate's subject name or subject alternate name (SAN) remains unchanged. \n \n Deploy the Bicep modules \n \n You can deploy the Bicep modules in the  bicep  folder using the  deploy.sh  Bash script in the same folder. Specify a value for the following parameters in the  deploy.sh  script and  main.parameters.json parameters file before deploying the Bicep modules. \n \n prefix : specifies a prefix for all the Azure resources. \n authenticationType : specifies the type of authentication when accessing the Virtual Machine.  sshPublicKey  is the recommended value. Allowed values:  sshPublicKey  and  password . \n vmAdminUsername : specifies the name of the administrator account of the virtual machine. \n vmAdminPasswordOrKey : specifies the SSH Key or password for the virtual machine. \n aksClusterSshPublicKey : specifies the SSH Key or password for AKS cluster agent nodes. \n aadProfileAdminGroupObjectIDs : when deploying an AKS cluster with Azure AD and Azure RBAC integration, this array parameter contains the list of Azure AD group object IDs that will have the admin role of the cluster. \n subdomain : specifies the subdomain of the workload hostname. Make sure this corresponds to the common name on the TLS certificate. If the hostname is  store.test.com , the subdomain should be  test . \n dnsZoneName : specifies name of the Azure DNS Zone, for example  test.com . \n dnsZoneResourceGroupName : specifies the nthe name of the resource group which contains the Azure DNS zone. \n namespace : specifies the namespace of the workload. \n keyVaultName : specifies the name of an existing Key Vault resource holding the TLS certificate. \n keyVaultResourceGroupName : specifies the name of the resource group that contains the existing Key Vault resource. \n keyVaultCertificateName : specifies the name of the existing TLS certificate in Azure Key Vault. \n secretProviderClassName : specifies the name of the  SecretProviderClass . \n secretName : specifies the name of the Kubernetes secret containing the TLS certificate. \n publicDnsZoneName : specifies the name of the public DNS zone used by the managed NGINX Ingress Controller, when enabled. \n publicDnsZoneResourceGroupName : specifies the resource group name of the public DNS zone used by the managed NGINX Ingress Controller, when enabled. \n \n We suggest reading sensitive configuration data such as passwords or SSH keys from a pre-existing Azure Key Vault resource. For more information, see Create parameters files for Bicep deployment. \n \n #!/bin/bash\n\n# Template\ntemplate=\"main.bicep\"\nparameters=\"main.bicepparam\"\n\n# AKS cluster name\nprefix=\"Babo\"\naksName=\"${prefix}Aks\"\nvalidateTemplate=0\nuseWhatIf=0\nupdate=1\ndeploy=1\ninstallExtensions=0\n\n# Name and location of the resource group for the Azure Kubernetes Service (AKS) cluster\nresourceGroupName=\"${prefix}RG\"\nlocation=\"NorthEurope\"\ndeploymentName=\"main\"\n\n# Subscription id, subscription name, and tenant id of the current subscription\nsubscriptionId=$(az account show --query id --output tsv)\nsubscriptionName=$(az account show --query name --output tsv)\ntenantId=$(az account show --query tenantId --output tsv)\n\n# Install aks-preview Azure extension\nif [[ $installExtensions == 1 ]]; then\n echo \"Checking if [aks-preview] extension is already installed...\"\n az extension show --name aks-preview &>/dev/null\n\n if [[ $? == 0 ]]; then\n echo \"[aks-preview] extension is already installed\"\n\n # Update the extension to make sure you have the latest version installed\n echo \"Updating [aks-preview] extension...\"\n az extension update --name aks-preview &>/dev/null\n else\n echo \"[aks-preview] extension is not installed. Installing...\"\n\n # Install aks-preview extension\n az extension add --name aks-preview 1>/dev/null\n\n if [[ $? == 0 ]]; then\n echo \"[aks-preview] extension successfully installed\"\n else\n echo \"Failed to install [aks-preview] extension\"\n exit\n fi\n fi\n\n # Registering AKS feature extensions\n aksExtensions=(\n \"AzureServiceMeshPreview\"\n \"AKS-KedaPreview\"\n \"RunCommandPreview\"\n \"EnableOIDCIssuerPreview\"\n \"EnableWorkloadIdentityPreview\"\n \"EnableImageCleanerPreview\"\n \"AKS-VPAPreview\"\n )\n ok=0\n registeringExtensions=()\n for aksExtension in ${aksExtensions[@]}; do\n echo \"Checking if [$aksExtension] extension is already registered...\"\n extension=$(az feature list -o table --query \"[?contains(name, 'Microsoft.ContainerService/$aksExtension') && @.properties.state == 'Registered'].{Name:name}\" --output tsv)\n if [[ -z $extension ]]; then\n echo \"[$aksExtension] extension is not registered.\"\n echo \"Registering [$aksExtension] extension...\"\n az feature register \\\n --name $aksExtension \\\n --namespace Microsoft.ContainerService \\\n --only-show-errors\n registeringExtensions+=(\"$aksExtension\")\n ok=1\n else\n echo \"[$aksExtension] extension is already registered.\"\n fi\n done\n echo $registeringExtensions\n delay=1\n for aksExtension in ${registeringExtensions[@]}; do\n echo -n \"Checking if [$aksExtension] extension is already registered...\"\n while true; do\n extension=$(az feature list -o table --query \"[?contains(name, 'Microsoft.ContainerService/$aksExtension') && @.properties.state == 'Registered'].{Name:name}\" --output tsv)\n if [[ -z $extension ]]; then\n echo -n \".\"\n sleep $delay\n else\n echo \".\"\n break\n fi\n done\n done\n\n if [[ $ok == 1 ]]; then\n echo \"Refreshing the registration of the Microsoft.ContainerService resource provider...\"\n az provider register \\\n --namespace Microsoft.ContainerService \\\n --only-show-errors\n echo \"Microsoft.ContainerService resource provider registration successfully refreshed\"\n fi\nfi\n\n# Get the last Kubernetes version available in the region\nkubernetesVersion=$(az aks get-versions \\\n --location $location \\\n --query \"values[?isPreview==null].version | sort(@) | [-1]\" \\\n --output tsv \\\n --only-show-errors)\n\nif [[ -n $kubernetesVersion ]]; then\n echo \"Successfully retrieved the last Kubernetes version [$kubernetesVersion] supported by AKS in [$location] Azure region\"\nelse\n echo \"Failed to retrieve the last Kubernetes version supported by AKS in [$location] Azure region\"\n exit\nfi\n\n# Check if the resource group already exists\necho \"Checking if [$resourceGroupName] resource group actually exists in the [$subscriptionName] subscription...\"\n\naz group show \\\n --name $resourceGroupName \\\n --only-show-errors &>/dev/null\n\nif [[ $? != 0 ]]; then\n echo \"No [$resourceGroupName] resource group actually exists in the [$subscriptionName] subscription\"\n echo \"Creating [$resourceGroupName] resource group in the [$subscriptionName] subscription...\"\n\n # Create the resource group\n az group create \\\n --name $resourceGroupName \\\n --location $location \\\n --only-show-errors 1>/dev/null\n\n if [[ $? == 0 ]]; then\n echo \"[$resourceGroupName] resource group successfully created in the [$subscriptionName] subscription\"\n else\n echo \"Failed to create [$resourceGroupName] resource group in the [$subscriptionName] subscription\"\n exit\n fi\nelse\n echo \"[$resourceGroupName] resource group already exists in the [$subscriptionName] subscription\"\nfi\n\n# Get the user principal name of the current user\necho \"Retrieving the user principal name of the current user from the [$tenantId] Azure AD tenant...\"\nuserPrincipalName=$(az account show \\\n --query user.name \\\n --output tsv \\\n --only-show-errors)\nif [[ -n $userPrincipalName ]]; then\n echo \"[$userPrincipalName] user principal name successfully retrieved from the [$tenantId] Azure AD tenant\"\nelse\n echo \"Failed to retrieve the user principal name of the current user from the [$tenantId] Azure AD tenant\"\n exit\nfi\n\n# Retrieve the objectId of the user in the Azure AD tenant used by AKS for user authentication\necho \"Retrieving the objectId of the [$userPrincipalName] user principal name from the [$tenantId] Azure AD tenant...\"\nuserObjectId=$(az ad user show \\\n --id $userPrincipalName \\\n --query id \\\n --output tsv \\\n --only-show-errors 2>/dev/null)\n\nif [[ -n $userObjectId ]]; then\n echo \"[$userObjectId] objectId successfully retrieved for the [$userPrincipalName] user principal name\"\nelse\n echo \"Failed to retrieve the objectId of the [$userPrincipalName] user principal name\"\n exit\nfi\n\n# Create AKS cluster if does not exist\necho \"Checking if [$aksName] aks cluster actually exists in the [$resourceGroupName] resource group...\"\n\naz aks show \\\n --name $aksName \\\n --resource-group $resourceGroupName \\\n --only-show-errors &>/dev/null\n\nnotExists=$?\n\nif [[ $notExists != 0 || $update == 1 ]]; then\n\n if [[ $notExists != 0 ]]; then\n echo \"No [$aksName] aks cluster actually exists in the [$resourceGroupName] resource group\"\n else\n echo \"[$aksName] aks cluster already exists in the [$resourceGroupName] resource group. Updating the cluster...\"\n fi\n\n # Validate the Bicep template\n if [[ $validateTemplate == 1 ]]; then\n if [[ $useWhatIf == 1 ]]; then\n # Execute a deployment What-If operation at resource group scope.\n echo \"Previewing changes deployed by [$template] Bicep template...\"\n az deployment group what-if \\\n --only-show-errors \\\n --resource-group $resourceGroupName \\\n --template-file $template \\\n --parameters $parameters \\\n --parameters prefix=$prefix \\\n location=$location \\\n userId=$userObjectId \\\n aksClusterKubernetesVersion=$kubernetesVersion\n\n if [[ $? == 0 ]]; then\n echo \"[$template] Bicep template validation succeeded\"\n else\n echo \"Failed to validate [$template] Bicep template\"\n exit\n fi\n else\n # Validate the Bicep template\n echo \"Validating [$template] Bicep template...\"\n output=$(az deployment group validate \\\n --only-show-errors \\\n --resource-group $resourceGroupName \\\n --template-file $template \\\n --parameters $parameters \\\n --parameters prefix=$prefix \\\n location=$location \\\n userId=$userObjectId \\\n aksClusterKubernetesVersion=$kubernetesVersion)\n\n if [[ $? == 0 ]]; then\n echo \"[$template] Bicep template validation succeeded\"\n else\n echo \"Failed to validate [$template] Bicep template\"\n echo $output\n exit\n fi\n fi\n fi\n\n if [[ $deploy == 1 ]]; then\n # Deploy the Bicep template\n echo \"Deploying [$template] Bicep template...\"\n az deployment group create \\\n --only-show-errors \\\n --resource-group $resourceGroupName \\\n --only-show-errors \\\n --template-file $template \\\n --parameters $parameters \\\n --parameters prefix=$prefix \\\n location=$location \\\n userId=$userObjectId \\\n aksClusterKubernetesVersion=$kubernetesVersion 1>/dev/null\n\n if [[ $? == 0 ]]; then\n echo \"[$template] Bicep template successfully provisioned\"\n else\n echo \"Failed to provision the [$template] Bicep template\"\n exit\n fi\n else\n echo \"Skipping the deployment of the [$template] Bicep template\"\n exit\n fi\nelse\n echo \"[$aksName] aks cluster already exists in the [$resourceGroupName] resource group\"\nfi\n\n# Retrieve the resource id of the AKS cluster\necho \"Retrieving the resource id of the [$aksName] AKS cluster...\"\naksClusterId=$(az aks show \\\n --name \"$aksName\" \\\n --resource-group \"$resourceGroupName\" \\\n --query id \\\n --output tsv \\\n --only-show-errors 2>/dev/null)\n\nif [[ -n $aksClusterId ]]; then\n echo \"Resource id of the [$aksName] AKS cluster successfully retrieved\"\nelse\n echo \"Failed to retrieve the resource id of the [$aksName] AKS cluster\"\n exit\nfi\n\n# Assign Azure Kubernetes Service RBAC Cluster Admin role to the current user\nrole=\"Azure Kubernetes Service RBAC Cluster Admin\"\necho \"Checking if [$userPrincipalName] user has been assigned to [$role] role on the [$aksName] AKS cluster...\"\ncurrent=$(az role assignment list \\\n --only-show-errors \\\n --assignee $userObjectId \\\n --scope $aksClusterId \\\n --query \"[?roleDefinitionName=='$role'].roleDefinitionName\" \\\n --output tsv 2>/dev/null)\n\nif [[ $current == \"Owner\" ]] || [[ $current == \"Contributor\" ]] || [[ $current == \"$role\" ]]; then\n echo \"[$userPrincipalName] user is already assigned to the [$current] role on the [$aksName] AKS cluster\"\nelse\n echo \"[$userPrincipalName] user is not assigned to the [$role] role on the [$aksName] AKS cluster\"\n echo \"Assigning the [$userPrincipalName] user to the [$role] role on the [$aksName] AKS cluster...\"\n\n az role assignment create \\\n --role \"$role\" \\\n --assignee $userObjectId \\\n --scope $aksClusterId \\\n --only-show-errors 1>/dev/null\n\n if [[ $? == 0 ]]; then\n echo \"[$userPrincipalName] user successfully assigned to the [$role] role on the [$aksName] AKS cluster\"\n else\n echo \"Failed to assign the [$userPrincipalName] user to the [$role] role on the [$aksName] AKS cluster\"\n exit\n fi\nfi\n\n# Assign Azure Kubernetes Service Cluster Admin Role role to the current user\nrole=\"Azure Kubernetes Service Cluster Admin Role\"\necho \"Checking if [$userPrincipalName] user has been assigned to [$role] role on the [$aksName] AKS cluster...\"\ncurrent=$(az role assignment list \\\n --only-show-errors \\\n --assignee $userObjectId \\\n --scope $aksClusterId \\\n --query \"[?roleDefinitionName=='$role'].roleDefinitionName\" \\\n --output tsv 2>/dev/null)\n\nif [[ $current == \"Owner\" ]] || [[ $current == \"Contributor\" ]] || [[ $current == \"$role\" ]]; then\n echo \"[$userPrincipalName] user is already assigned to the [$current] role on the [$aksName] AKS cluster\"\nelse\n echo \"[$userPrincipalName] user is not assigned to the [$role] role on the [$aksName] AKS cluster\"\n echo \"Assigning the [$userPrincipalName] user to the [$role] role on the [$aksName] AKS cluster...\"\n\n az role assignment create \\\n --role \"$role\" \\\n --assignee $userObjectId \\\n --scope $aksClusterId \\\n --only-show-errors 1>/dev/null\n\n if [[ $? == 0 ]]; then\n echo \"[$userPrincipalName] user successfully assigned to the [$role] role on the [$aksName] AKS cluster\"\n else\n echo \"Failed to assign the [$userPrincipalName] user to the [$role] role on the [$aksName] AKS cluster\"\n exit\n fi\nfi\n\n# Get the FQDN of the Azure Front Door endpoint\nazureFrontDoorEndpointFqdn=$(az deployment group show \\\n --name $deploymentName \\\n --resource-group $resourceGroupName \\\n --query properties.outputs.frontDoorEndpointFqdn.value \\\n --output tsv \\\n --only-show-errors)\n\nif [[ -n $azureFrontDoorEndpointFqdn ]]; then\n echo \"FQDN of the Azure Front Door endpoint: $azureFrontDoorEndpointFqdn\"\nelse\n echo \"Failed to get the FQDN of the Azure Front Door endpoint\"\n exit -1\nfi\n\n# Get the private link service name\nprivateLinkServiceName=$(az deployment group show \\\n --name $deploymentName \\\n --resource-group $resourceGroupName \\\n --query properties.outputs.privateLinkServiceName.value \\\n --output tsv \\\n --only-show-errors)\n\nif [[ -z $privateLinkServiceName ]]; then\n echo \"Failed to get the private link service name\"\n exit -1\nfi\n\n# Get the resource id of the Private Endpoint Connection\nprivateEndpointConnectionId=$(az network private-endpoint-connection list \\\n --name $privateLinkServiceName \\\n --resource-group $resourceGroupName \\\n --type Microsoft.Network/privateLinkServices \\\n --query [0].id \\\n --output tsv \\\n --only-show-errors)\n\nif [[ -n $privateEndpointConnectionId ]]; then\n echo \"Resource id of the Private Endpoint Connection: $privateEndpointConnectionId\"\nelse\n echo \"Failed to get the resource id of the Private Endpoint Connection\"\n exit -1\nfi\n\n# Approve the private endpoint connection\necho \"Approving [$privateEndpointConnectionId] private endpoint connection ID...\"\naz network private-endpoint-connection approve \\\n --name $privateLinkServiceName \\\n --resource-group $resourceGroupName \\\n --id $privateEndpointConnectionId \\\n --description \"Approved\" \\\n --only-show-errors 1>/dev/null\n\nif [[ $? == 0 ]]; then\n echo \"[$privateEndpointConnectionId] private endpoint connection ID successfully approved\"\nelse\n echo \"Failed to approve [$privateEndpointConnectionId] private endpoint connection ID\"\n exit -1\nfi \n \n The last steps of the Bash script perform the following actions: \n \n az deployment group show command: retrieves the name of the Azure Private Link Service from the outputs of the deployment. \n az network private-endpoint-connection list command: gets the resource id of the Azure Private Link Service. \n az network private-endpoint-connection approve approves the private endpoint connection from the Azure Front Door Premium resource. \n \n If you miss running these steps, Azure Front Door cannot invoke the httpbin web application via the Azure Private Link Service, and the  kubernetes-internal internal load balancer of the AKS cluster. \n \n Front Door Bicep Module \n \n The following table contains the code from the  frontDoor.bicep  Bicep module used to deploy and configure Azure Front Door Premium. \n \n // Parameters\n@description('Specifies the name of the Azure Front Door.')\nparam frontDoorName string\n\n@description('The name of the SKU to use when creating the Front Door profile.')\n@allowed([\n 'Standard_AzureFrontDoor'\n 'Premium_AzureFrontDoor'\n])\nparam frontDoorSkuName string = 'Premium_AzureFrontDoor'\n\n@description('Specifies the name of the Front Door user-defined managed identity.')\nparam managedIdentityName string\n\n@description('Specifies the send and receive timeout on forwarding request to the origin. When timeout is reached, the request fails and returns.')\nparam originResponseTimeoutSeconds int = 30\n\n@description('Specifies the name of the Azure Front Door Origin Group for the web application.')\nparam originGroupName string\n\n@description('Specifies the name of the Azure Front Door Origin for the web application.')\nparam originName string\n\n@description('Specifies the address of the origin. Domain names, IPv4 addresses, and IPv6 addresses are supported.This should be unique across all origins in an endpoint.')\nparam hostName string\n\n@description('Specifies the value of the HTTP port. Must be between 1 and 65535.')\nparam httpPort int = 80\n\n@description('Specifies the value of the HTTPS port. Must be between 1 and 65535.')\nparam httpsPort int = 443\n\n@description('Specifies the host header value sent to the origin with each request. If you leave this blank, the request hostname determines this value. Azure Front Door origins, such as Web Apps, Blob Storage, and Cloud Services require this host header value to match the origin hostname by default. This overrides the host header defined at Endpoint.')\nparam originHostHeader string\n\n@description('Specifies the priority of origin in given origin group for load balancing. Higher priorities will not be used for load balancing if any lower priority origin is healthy.Must be between 1 and 5.')\n@minValue(1)\n@maxValue(5)\nparam priority int = 1\n\n@description('Specifies the weight of the origin in a given origin group for load balancing. Must be between 1 and 1000.')\n@minValue(1)\n@maxValue(1000)\nparam weight int = 1000\n\n@description('Specifies whether to enable health probes to be made against backends defined under backendPools. Health probes can only be disabled if there is a single enabled backend in single enabled backend pool.')\n@allowed([\n 'Enabled'\n 'Disabled'\n])\nparam originEnabledState string = 'Enabled'\n\n@description('Specifies the resource id of a private link service.')\nparam privateLinkResourceId string\n\n@description('Specifies the number of samples to consider for load balancing decisions.')\nparam sampleSize int = 4\n\n@description('Specifies the number of samples within the sample period that must succeed.')\nparam successfulSamplesRequired int = 3\n\n@description('Specifies the additional latency in milliseconds for probes to fall into the lowest latency bucket.')\nparam additionalLatencyInMilliseconds int = 50\n\n@description('Specifies path relative to the origin that is used to determine the health of the origin.')\nparam probePath string = '/'\n\n@description('The custom domain name to associate with your Front Door endpoint.')\nparam customDomainName string\n\n@description('Specifies the health probe request type.')\n@allowed([\n 'GET'\n 'HEAD'\n 'NotSet'\n])\nparam probeRequestType string = 'GET'\n\n@description('Specifies the health probe protocol.')\n@allowed([\n 'Http'\n 'Https'\n 'NotSet'\n])\nparam probeProtocol string = 'Http'\n\n@description('Specifies the number of seconds between health probes.Default is 240 seconds.')\nparam probeIntervalInSeconds int = 60\n\n@description('Specifies whether to allow session affinity on this host. Valid options are Enabled or Disabled.')\n@allowed([\n 'Enabled'\n 'Disabled'\n])\nparam sessionAffinityState string = 'Disabled'\n\n@description('Specifies the endpoint name reuse scope. The default value is TenantReuse.')\n@allowed([\n 'NoReuse'\n 'ResourceGroupReuse'\n 'SubscriptionReuse'\n 'TenantReuse'\n])\nparam autoGeneratedDomainNameLabelScope string = 'TenantReuse'\n\n@description('Specifies the name of the Azure Front Door Route for the web application.')\nparam routeName string\n\n@description('Specifies a directory path on the origin that Azure Front Door can use to retrieve content from, e.g. contoso.cloudapp.net/originpath.')\nparam originPath string = '/'\n\n@description('Specifies the rule sets referenced by this endpoint.')\nparam ruleSets array = []\n\n@description('Specifies the list of supported protocols for this route')\nparam supportedProtocols array = [\n 'Http'\n 'Https'\n]\n\n@description('Specifies the route patterns of the rule.')\nparam routePatternsToMatch array = [ '/*' ]\n\n@description('Specifies the protocol this rule will use when forwarding traffic to backends.')\n@allowed([\n 'HttpOnly'\n 'HttpsOnly'\n 'MatchRequest'\n])\nparam forwardingProtocol string = 'HttpsOnly'\n\n@description('Specifies whether this route will be linked to the default endpoint domain.')\n@allowed([\n 'Enabled'\n 'Disabled'\n])\nparam linkToDefaultDomain string = 'Enabled'\n\n@description('Specifies whether to automatically redirect HTTP traffic to HTTPS traffic. Note that this is a easy way to set up this rule and it will be the first rule that gets executed.')\n@allowed([\n 'Enabled'\n 'Disabled'\n])\nparam httpsRedirect string = 'Enabled'\n\n@description('Specifies the name of the Azure Front Door Endpoint for the web application.')\nparam endpointName string\n\n@description('Specifies whether to enable use of this rule. Permitted values are Enabled or Disabled')\n@allowed([\n 'Enabled'\n 'Disabled'\n])\nparam endpointEnabledState string = 'Enabled'\n\n@description('Specifies the name of the Azure Front Door WAF policy.')\nparam wafPolicyName string\n\n@description('Specifies the WAF policy is in detection mode or prevention mode.')\n@allowed([\n 'Detection'\n 'Prevention'\n])\nparam wafPolicyMode string = 'Prevention'\n\n@description('Specifies if the policy is in enabled or disabled state. Defaults to Enabled if not specified.')\nparam wafPolicyEnabledState string = 'Enabled'\n\n@description('Specifies the list of managed rule sets to configure on the WAF.')\nparam wafManagedRuleSets array = []\n\n@description('Specifies the list of custom rulesto configure on the WAF.')\nparam wafCustomRules array = []\n\n@description('Specifies if the WAF policy managed rules will inspect the request body content.')\n@allowed([\n 'Enabled'\n 'Disabled'\n])\nparam wafPolicyRequestBodyCheck string = 'Enabled'\n\n@description('Specifies name of the security policy.')\nparam securityPolicyName string\n\n@description('Specifies the list of patterns to match by the security policy.')\nparam securityPolicyPatternsToMatch array = [ '/*' ]\n\n@description('Specifies the resource id of the Log Analytics workspace.')\nparam workspaceId string\n\n@description('Specifies the location.')\nparam location string = resourceGroup().location\n\n@description('Specifies the resource tags.')\nparam tags object\n\n@description('Specifies the name of the resource group that contains the key vault with custom domain\\'s certificate.')\nparam keyVaultResourceGroupName string = resourceGroup().name\n\n@description('Specifies the name of the Key Vault that contains the custom domain certificate.')\nparam keyVaultName string\n\n@description('Specifies the name of the Key Vault secret that contains the custom domain certificate.')\nparam keyVaultCertificateName string\n\n@description('Specifies the version of the Key Vault secret that contains the custom domain certificate. Set the value to an empty string to use the latest version.')\nparam keyVaultCertificateVersion string = ''\n\n@description('Specifies the TLS protocol version that will be used for Https')\nparam minimumTlsVersion string = 'TLS12'\n\n// Variables\nvar diagnosticSettingsName = 'diagnosticSettings'\nvar logCategories = [\n 'FrontDoorAccessLog'\n 'FrontDoorHealthProbeLog'\n 'FrontDoorWebApplicationFirewallLog'\n]\nvar metricCategories = [\n 'AllMetrics'\n]\nvar logs = [for category in logCategories: {\n category: category\n enabled: true\n retentionPolicy: {\n enabled: true\n days: 0\n }\n}]\nvar metrics = [for category in metricCategories: {\n category: category\n enabled: true\n retentionPolicy: {\n enabled: true\n days: 0\n }\n}]\n\n// Resources\nresource keyVault 'Microsoft.KeyVault/vaults@2022-07-01' existing = {\n scope: resourceGroup(keyVaultResourceGroupName)\n name: keyVaultName\n\n resource secret 'secrets' existing = {\n name: keyVaultCertificateName\n }\n}\n\nresource managedIdentity 'Microsoft.ManagedIdentity/userAssignedIdentities@2023-07-31-preview' existing = {\n name: managedIdentityName\n}\n\nresource frontDoor 'Microsoft.Cdn/profiles@2022-11-01-preview' = {\n name: frontDoorName\n location: 'Global'\n tags: tags\n sku: {\n name: frontDoorSkuName\n }\n identity: {\n type: 'UserAssigned'\n userAssignedIdentities: {\n '${managedIdentity.id}': {}\n }\n }\n properties: {\n originResponseTimeoutSeconds: originResponseTimeoutSeconds\n }\n}\n\nresource originGroup 'Microsoft.Cdn/profiles/origingroups@2022-11-01-preview' = {\n parent: frontDoor\n name: originGroupName\n properties: {\n loadBalancingSettings: {\n sampleSize: sampleSize\n successfulSamplesRequired: successfulSamplesRequired\n additionalLatencyInMilliseconds: additionalLatencyInMilliseconds\n }\n healthProbeSettings: {\n probePath: probePath\n probeRequestType: probeRequestType\n probeProtocol: probeProtocol\n probeIntervalInSeconds: probeIntervalInSeconds\n }\n sessionAffinityState: sessionAffinityState\n }\n}\n\nresource origin 'Microsoft.Cdn/profiles/origingroups/origins@2022-11-01-preview' = {\n parent: originGroup\n name: originName\n properties: {\n hostName: hostName\n httpPort: httpPort\n httpsPort: httpsPort\n originHostHeader: originHostHeader\n priority: priority\n weight: weight\n enabledState: originEnabledState\n sharedPrivateLinkResource: empty(privateLinkResourceId) ? {} : {\n privateLink: {\n id: privateLinkResourceId\n }\n privateLinkLocation: location\n status: 'Approved'\n requestMessage: 'Please approve this request to allow Front Door to access the container app'\n }\n enforceCertificateNameCheck: true\n }\n}\n\nresource endpoint 'Microsoft.Cdn/profiles/afdEndpoints@2022-11-01-preview' = {\n parent: frontDoor\n name: endpointName\n location: 'Global'\n properties: {\n autoGeneratedDomainNameLabelScope: toUpper(autoGeneratedDomainNameLabelScope)\n enabledState: endpointEnabledState\n }\n}\n\nresource route 'Microsoft.Cdn/profiles/afdEndpoints/routes@2022-11-01-preview' = {\n parent: endpoint\n name: routeName\n properties: {\n customDomains: [\n {\n id: customDomain.id\n }\n ]\n originGroup: {\n id: originGroup.id\n }\n originPath: originPath\n ruleSets: ruleSets\n supportedProtocols: supportedProtocols\n patternsToMatch: routePatternsToMatch\n forwardingProtocol: forwardingProtocol\n linkToDefaultDomain: linkToDefaultDomain\n httpsRedirect: httpsRedirect\n }\n dependsOn: [\n origin\n ]\n}\n\nresource secret 'Microsoft.Cdn/profiles/secrets@2023-07-01-preview' = {\n name: toLower(format('{0}-{1}-latest', keyVaultName, keyVaultCertificateName))\n parent: frontDoor\n properties: {\n parameters: {\n type: 'CustomerCertificate'\n useLatestVersion: (keyVaultCertificateVersion == '')\n secretVersion: keyVaultCertificateVersion\n secretSource: {\n id: keyVault::secret.id\n }\n }\n }\n}\n\nresource customDomain 'Microsoft.Cdn/profiles/customDomains@2023-07-01-preview' = {\n name: replace(customDomainName, '.', '-')\n parent: frontDoor\n properties: {\n hostName: customDomainName\n tlsSettings: {\n certificateType: 'CustomerCertificate'\n minimumTlsVersion: minimumTlsVersion\n secret: {\n id: secret.id\n }\n }\n }\n}\n\nresource wafPolicy 'Microsoft.Network/FrontDoorWebApplicationFirewallPolicies@2022-05-01' = {\n name: wafPolicyName\n location: 'Global'\n tags: tags\n sku: {\n name: frontDoorSkuName\n }\n properties: {\n policySettings: {\n enabledState: wafPolicyEnabledState\n mode: wafPolicyMode\n requestBodyCheck: wafPolicyRequestBodyCheck\n }\n managedRules: {\n managedRuleSets: wafManagedRuleSets\n }\n customRules: {\n rules: wafCustomRules\n }\n }\n}\n\nresource securityPolicy 'Microsoft.Cdn/profiles/securitypolicies@2022-11-01-preview' = {\n parent: frontDoor\n name: securityPolicyName\n properties: {\n parameters: {\n type: 'WebApplicationFirewall'\n wafPolicy: {\n id: wafPolicy.id\n }\n associations: [\n {\n domains: [\n {\n id: endpoint.id\n }\n {\n id: customDomain.id\n }\n ]\n patternsToMatch: securityPolicyPatternsToMatch\n }\n ]\n\n }\n }\n}\n\n// Diagnostics Settings\nresource diagnosticSettings 'Microsoft.Insights/diagnosticSettings@2021-05-01-preview' = {\n name: diagnosticSettingsName\n scope: frontDoor\n properties: {\n workspaceId: workspaceId\n logs: logs\n metrics: metrics\n }\n}\n\n// Outputs\noutput id string = frontDoor.id\noutput name string = frontDoor.name\noutput frontDoorEndpointFqdn string = endpoint.properties.hostName\noutput customDomainValidationDnsTxtRecordValue string = customDomain.properties.validationProperties.validationToken != null ? customDomain.properties.validationProperties.validationToken : ''\noutput customDomainValidationExpiry string = customDomain.properties.validationProperties.expirationDate\noutput customDomainDeploymentStatus string = customDomain.properties.deploymentStatus\noutput customDomainValidationState string = customDomain.properties.domainValidationState \n \n The Bicep module creates the following resources: \n \n Azure Front Door profile with a user-assigned managed identity. The identity has a Key Vault Administrator role assignment to let it read the TLS certificate as a secret from the Key Vault resource. \n Azure Front Door origin group with the specified name ( originGroupName ). It includes load balancing settings and health probe settings. \n Azure Front Door origin with the specified name ( originName ). It includes the origin's host name, HTTP and HTTPS ports, origin host header, priority, weight, enabled state, and any shared private link resource. \n Azure Front Door endpoint with the specified name ( endpointName ). It includes the auto-generated domain name label scope and enabled state. \n Azure Front Door route with the specified name ( routeName ). It includes the custom domains associated with the endpoint, origin group, origin path, rule sets, supported protocols, route patterns to match, forwarding protocol, link to default domain, and HTTPS redirect settings. \n Key Vault secret with the custom domain certificate specified ( keyVaultCertificateName ) and the latest version of the certificate. \n Azure Front Door custom domain with the specified name ( customDomainName ). It includes the custom domain host name, TLS settings with the customer certificate, and the Key Vault secret ID. \n Azure Front Door WAF policy with the specified name ( wafPolicyName ). It includes the WAF policy settings, managed rule sets, and custom rules. In particular, one of the custom rules blocks incoming requests when they contain the word  blockme  in the query string. \n Azure Front Door security policy with the specified name ( securityPolicyName ). It includes the security policy parameters, WAF policy association with the endpoint and custom domain, and patterns to match. \n Diagnostic settings for Azure Front Door with the specified name ( diagnosticSettingsName ). It includes the workspace ID, enabled logs (FrontDoorAccessLog, FrontDoorHealthProbeLog, and FrontDoorWebApplicationFirewallLog), and enabled metrics (AllMetrics). \n \n The module also defines several input parameters to customize the configuration, such as the Front Door name, SKU, origin group and origin names, origin details (hostname, ports, host header, etc.), custom domain name, routing settings, WAF policy details, security policy name, diagnostic settings, etc. \n Finally, the module provides several output variables, including the Front Door ID and name, Front Door endpoint FQDN, custom domain validation DNS TXT record value, custom domain validation expiry date, custom domain deployment status, and custom domain validation state. \n \n Deployment Script \n \n The sample makes use of a Deployment Script to run the  install-front-door-end-to-end-tls.sh  Bash script which installs the httpbin web application via YAML templates and the following packages to the AKS cluster via Helm. For more information on deployment scripts, see Use deployment scripts in Bicep \n \n NGINX Ingress Controller \n Cert-Manager \n Prometheus \n \n \n # Install kubectl\naz aks install-cli --only-show-errors\n\n# Get AKS credentials\naz aks get-credentials \\\n --admin \\\n --name $clusterName \\\n --resource-group $resourceGroupName \\\n --subscription $subscriptionId \\\n --only-show-errors\n\n# Check if the cluster is private or not\nprivate=$(az aks show --name $clusterName \\\n --resource-group $resourceGroupName \\\n --subscription $subscriptionId \\\n --query apiServerAccessProfile.enablePrivateCluster \\\n --output tsv)\n\n# Install Helm\ncurl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 -o get_helm.sh -s\nchmod 700 get_helm.sh\n./get_helm.sh &>/dev/null\n\n# Add Helm repos\nhelm repo add prometheus-community https://prometheus-community.github.io/helm-charts\nhelm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx\nhelm repo add jetstack https://charts.jetstack.io\n\n# Update Helm repos\nhelm repo update\n\n# Install Prometheus\nif [[ \"$installPrometheusAndGrafana\" == \"true\" ]]; then\n echo \"Installing Prometheus and Grafana...\"\n helm install prometheus prometheus-community/kube-prometheus-stack \\\n --create-namespace \\\n --namespace prometheus \\\n --set prometheus.prometheusSpec.podMonitorSelectorNilUsesHelmValues=false \\\n --set prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues=false\nfi\n\n# Install NGINX ingress controller using the internal load balancer\nif [[ \"$nginxIngressControllerType\" == \"Unmanaged\" || \"$installNginxIngressController\" == \"true\" ]]; then\n if [[ \"$nginxIngressControllerType\" == \"Unmanaged\" ]]; then\n echo \"Installing unmanaged NGINX ingress controller on the internal load balancer...\"\n helm install nginx-ingress ingress-nginx/ingress-nginx \\\n --create-namespace \\\n --namespace ingress-basic \\\n --set controller.replicaCount=3 \\\n --set controller.nodeSelector.\"kubernetes\\.io/os\"=linux \\\n --set defaultBackend.nodeSelector.\"kubernetes\\.io/os\"=linux \\\n --set controller.metrics.enabled=true \\\n --set controller.metrics.serviceMonitor.enabled=true \\\n --set controller.metrics.serviceMonitor.additionalLabels.release=\"prometheus\" \\\n --set controller.service.annotations.\"service\\.beta\\.kubernetes\\.io/azure-load-balancer-health-probe-request-path\"=/healthz \\\n --set controller.service.annotations.\"service\\.beta\\.kubernetes\\.io/azure-load-balancer-internal\"=true\n else\n echo \"Installing unmanaged NGINX ingress controller on the public load balancer...\"\n helm install nginx-ingress ingress-nginx/ingress-nginx \\\n --create-namespace \\\n --namespace ingress-basic \\\n --set controller.replicaCount=3 \\\n --set controller.nodeSelector.\"kubernetes\\.io/os\"=linux \\\n --set defaultBackend.nodeSelector.\"kubernetes\\.io/os\"=linux \\\n --set controller.metrics.enabled=true \\\n --set controller.metrics.serviceMonitor.enabled=true \\\n --set controller.metrics.serviceMonitor.additionalLabels.release=\"prometheus\" \\\n --set controller.service.annotations.\"service\\.beta\\.kubernetes\\.io/azure-load-balancer-health-probe-request-path\"=/healthz\n fi\nfi\n\n# Create values.yaml file for cert-manager\necho \"Creating values.yaml file for cert-manager...\"\ncat <<EOF >values.yaml\npodLabels:\n azure.workload.identity/use: \"true\"\nserviceAccount:\n labels:\n azure.workload.identity/use: \"true\"\nEOF\n\n# Install certificate manager\nif [[ \"$installCertManager\" == \"true\" ]]; then\n echo \"Installing cert-manager...\"\n helm install cert-manager jetstack/cert-manager \\\n --create-namespace \\\n --namespace cert-manager \\\n --set crds.enabled=true \\\n --set nodeSelector.\"kubernetes\\.io/os\"=linux \\\n --values values.yaml\n\n # Create this cluster issuer only when the unmanaged NGINX ingress controller is installed and configured to use the AKS public load balancer\n if [[ -n \"$email\" && (\"$nginxIngressControllerType\" == \"Managed\" || \"$installNginxIngressController\" == \"true\") ]]; then\n echo \"Creating the letsencrypt-nginx cluster issuer for the unmanaged NGINX ingress controller...\"\n cat <<EOF | kubectl apply -f -\napiVersion: cert-manager.io/v1\nkind: ClusterIssuer\nmetadata:\n name: letsencrypt-nginx\nspec:\n acme:\n server: https://acme-v02.api.letsencrypt.org/directory\n email: $email\n privateKeySecretRef:\n name: letsencrypt\n solvers:\n - http01:\n ingress:\n class: nginx\n podTemplate:\n spec:\n nodeSelector:\n \"kubernetes.io/os\": linux\nEOF\n fi\n\n # Create this cluster issuer only when the managed NGINX ingress controller is installed and configured to use the AKS public load balancer\n if [[ -n \"$email\" && \"$webAppRoutingEnabled\" == \"true\" ]]; then\n echo \"Creating the letsencrypt-webapprouting cluster issuer for the managed NGINX ingress controller...\"\n cat <<EOF | kubectl apply -f -\napiVersion: cert-manager.io/v1\nkind: ClusterIssuer\nmetadata:\n name: letsencrypt-webapprouting\nspec:\n acme:\n server: https://acme-v02.api.letsencrypt.org/directory\n email: $email\n privateKeySecretRef:\n name: letsencrypt\n solvers:\n - http01:\n ingress:\n class: webapprouting.kubernetes.azure.com\n podTemplate:\n spec:\n nodeSelector:\n \"kubernetes.io/os\": linux\nEOF\n fi\n\n # Create cluster issuer\n if [[ -n \"$email\" && -n \"$dnsZoneResourceGroupName\" && -n \"$subscriptionId\" && -n \"$dnsZoneName\" && -n \"$certManagerClientId\" ]]; then\n echo \"Creating the letsencrypt-dns cluster issuer...\"\n cat <<EOF | kubectl apply -f -\napiVersion: cert-manager.io/v1\nkind: ClusterIssuer\nmetadata:\n name: letsencrypt-dns\n namespace: kube-system\nspec:\n acme:\n server: https://acme-v02.api.letsencrypt.org/directory\n email: $email\n privateKeySecretRef:\n name: letsencrypt-dns\n solvers:\n - dns01:\n azureDNS:\n resourceGroupName: $dnsZoneResourceGroupName\n subscriptionID: $subscriptionId\n hostedZoneName: $dnsZoneName\n environment: AzurePublicCloud\n managedIdentity:\n clientID: $certManagerClientId\nEOF\n fi\nfi\n\n# Configure the managed NGINX ingress controller to use an internal Azure load balancer\nif [[ \"$nginxIngressControllerType\" == \"Managed\" ]]; then\n echo \"Creating a managed NGINX ingress controller configured to use an internal Azure load balancer...\"\n cat <<EOF | kubectl apply -f -\napiVersion: approuting.kubernetes.azure.com/v1alpha1\nkind: NginxIngressController\nmetadata:\n name: nginx-internal\nspec:\n controllerNamePrefix: nginx-internal\n ingressClassName: nginx-internal\n loadBalancerAnnotations: \n service.beta.kubernetes.io/azure-load-balancer-internal: \"true\"\nEOF\nfi\n\n# Create a namespace for the application\necho \"Creating the [$namespace] namespace...\"\nkubectl create namespace $namespace\n\n# Create the Secret Provider Class object\necho \"Creating the [$secretProviderClassName] secret provider lass object in the [$namespace] namespace...\"\ncat <<EOF | kubectl apply -n $namespace -f -\napiVersion: secrets-store.csi.x-k8s.io/v1\nkind: SecretProviderClass\nmetadata:\n name: $secretProviderClassName\nspec:\n provider: azure\n secretObjects:\n - secretName: $secretName\n type: kubernetes.io/tls\n data: \n - objectName: $keyVaultCertificateName\n key: tls.key\n - objectName: $keyVaultCertificateName\n key: tls.crt\n parameters:\n usePodIdentity: \"false\"\n useVMManagedIdentity: \"true\"\n userAssignedIdentityID: $csiDriverClientId\n keyvaultName: $keyVaultName\n objects: |\n array:\n - |\n objectName: $keyVaultCertificateName\n objectType: secret\n tenantId: $tenantId\nEOF\n\n# Create deployment and service in the namespace\necho \"Creating the sample deployment and service in the [$namespace] namespace...\"\ncat <<EOF | kubectl apply -n $namespace -f -\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: httpbin\nspec:\n replicas: 3\n selector:\n matchLabels:\n app: httpbin\n template:\n metadata:\n labels:\n app: httpbin\n spec:\n topologySpreadConstraints:\n - maxSkew: 1\n topologyKey: topology.kubernetes.io/zone\n whenUnsatisfiable: DoNotSchedule\n labelSelector:\n matchLabels:\n app: httpbin\n - maxSkew: 1\n topologyKey: kubernetes.io/hostname\n whenUnsatisfiable: DoNotSchedule\n labelSelector:\n matchLabels:\n app: httpbin\n nodeSelector:\n \"kubernetes.io/os\": linux\n containers:\n - name: httpbin\n image: docker.io/kennethreitz/httpbin\n imagePullPolicy: IfNotPresent\n securityContext:\n allowPrivilegeEscalation: false\n resources:\n requests:\n memory: \"64Mi\"\n cpu: \"125m\"\n limits:\n memory: \"128Mi\"\n cpu: \"250m\"\n ports:\n - containerPort: 80\n env:\n - name: PORT\n value: \"80\"\n volumeMounts:\n - name: secrets-store-inline\n mountPath: \"/mnt/secrets-store\"\n readOnly: true\n volumes:\n - name: secrets-store-inline\n csi:\n driver: secrets-store.csi.k8s.io\n readOnly: true\n volumeAttributes:\n secretProviderClass: \"$secretProviderClassName\"\n---\napiVersion: v1\nkind: Service\nmetadata:\n name: httpbin\nspec:\n ports:\n - port: 80\n targetPort: 80\n protocol: TCP\n type: ClusterIP\n selector:\n app: httpbin\nEOF\n\n# Determine the ingressClassName\nif [[ \"$nginxIngressControllerType\" == \"Managed\" ]]; then\n ingressClassName=\"nginx-internal\"\nelse\n ingressClassName=\"nginx\"\nfi\n\n# Create an ingress resource for the application\necho \"Creating an ingress in the [$namespace] namespace configured to use the [$ingressClassName] ingress class...\"\ncat <<EOF | kubectl apply -n $namespace -f -\napiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n name: httpbin\n annotations:\n nginx.ingress.kubernetes.io/proxy-connect-timeout: \"360\"\n nginx.ingress.kubernetes.io/proxy-send-timeout: \"360\"\n nginx.ingress.kubernetes.io/proxy-read-timeout: \"360\"\n nginx.ingress.kubernetes.io/proxy-next-upstream-timeout: \"360\"\n external-dns.alpha.kubernetes.io/ingress-hostname-source: \"annotation-only\" # This entry tell ExternalDNS to only use the hostname defined in the annotation, hence not to create any DNS records for this ingress\nspec:\n ingressClassName: $ingressClassName\n tls:\n - hosts:\n - $hostname\n secretName: $secretName\n rules:\n - host: $hostname\n http:\n paths:\n - path: /\n pathType: Prefix\n backend:\n service:\n name: httpbin\n port:\n number: 80\nEOF\n\n# Create output as JSON file\necho '{}' |\n jq --arg x 'prometheus' '.prometheus=$x' |\n jq --arg x 'cert-manager' '.certManager=$x' |\n jq --arg x 'ingress-basic' '.nginxIngressController=$x' >$AZ_SCRIPTS_OUTPUT_PATH \n \n As you can note, when deploying the NGINX Ingress Controller via Helm, the service.beta.kubernetes.io/azure-load-balancer-internal to create the  kubernetes-internal  internal load balancer in the node resource group of the AKS cluster and expose the ingress controller service via a private IP address. \n The deployment script uses a  SecretProviderClass  to retrieve the TLS certificate from Azure Key Vault and generate the Kubernetes secret for the  ingress  object. The TLS certificate's common name must match the  ingress  hostname and the Azure Front Door custom domain. The Secrets Store CSI Driver for Key Vault only creates the Kubernetes secret that contains the TLS certificate when the  deployment  utilizing the  SecretProviderClass  in a volume definition is created. For more information, see Set up Secrets Store CSI Driver to enable NGINX Ingress Controller with TLS. \n The script uses YAML templates to create the  deployment  and  service  for the httpbin web application. You can mdofiy the script to install your own application. In particular, an ingress is used to expose the application via the NGINX Ingress Controller via the HTTPS protocol using the TLS certificate common name as a hostname. The ingress object can be easily modified to expose the server via HTTPS and provide a certificate for TLS termination. \n If you want to replace the NGINX ingress controller installed via Helm by the deployment script with the managed version installed by the application routing addon, you can just replace the  nginx   ingressClassName  in the  ingress  object with the name of the ingress controller deployed by the application routing addon, that, by default is equal to webapprouting.kubernetes.azure.com .  \n \n Alternative Solution \n \n Azure Private Link Service (PLS) is an infrastructure component that allows users to privately connect via an Azure Private Endpoint (PE) in a virtual network in Azure and a Frontend IP Configuration associated with an internal or public Azure Load Balancer (ALB). With Private Link, users as service providers can securely provide their services to consumers who can connect from within Azure or on-premises without data exfiltration risks. \n Before Private Link Service integration, users who wanted private connectivity from on-premises or other virtual networks to their services in an Azure Kubernetes Service(AKS) cluster were required to create a Private Link Service (PLS) to reference the cluster Azure Load Balancer, like in this sample. The user would then create an Azure Private Endpoint (PE) to connect to the PLS to enable private connectivity. With the Azure Private Link Service Integration feature, a managed Azure Private Link Service (PLS) to the AKS cluster load balancer can be created automatically, and the user would only be required to create Private Endpoint connections to it for private connectivity. You can expose a Kubernetes service via a Private Link Service using annotations. For more information, see Azure Private Link Service Integration. \n \n CI/CD and GitOps Considerations \n \n Azure Private Link Service Integration simplifies the creation of a Azure Private Link Service (PLS) when deploying Kubernetes services or ingress controllers via a classic CI/CD pipeline using Azure DevOps, GitHub Actions, Jenkins, or GitLab, but even when using a GitOps approach with Argo CD or Flux v2. \n For every workload that you expose via Azure Private Link Service (PLS) and Azure Front Door Premium, you need to create - Microsoft.Cdn/profiles/originGroups: an Origin Group, an Origin, endpoint, a route, and a security policy if you want to protect the workload with a WAF policy. You can accomplish this task using [az network front-door](az network front-door) Azure CLI commands in the CD pipeline used to deploy your service. \n \n Test the application \n \n If the deployment succeeds, and the private endpoint connection from the Azure Front Door Premium instance to the Azure Private Link Service (PLS) is approved, you should be able to access the AKS-hosted httpbin web application as follows: \n   \n \n Navigate to the overview page of your Front Door Premium in the Azure Portal and copy the URL from the Endpoint hostname. \n Paste and open the URL in your favorite internet browser. You should see the user interface of the httpbin application: \n \n \n   \n You can use the  bicep/test.sh  Bash script to simulate a few attacks and see the managed rule set and custom rule of the Azure Web Application Firewall in action. \n \n #!/bin/bash\n\n# Variables\nurl=\"<Front Door Endpoint Hostname URL>\"\n\n# Call REST API\necho \"Calling REST API...\"\ncurl -I -s \"$url\"\n\n# Simulate SQL injection\necho \"Simulating SQL injection...\"\ncurl -I -s \"${url}?users=ExampleSQLInjection%27%20--\"\n\n# Simulate XSS\necho \"Simulating XSS...\"\ncurl -I -s \"${url}?users=ExampleXSS%3Cscript%3Ealert%28%27XSS%27%29%3C%2Fscript%3E\"\n\n# A custom rule blocks any request with the word blockme in the querystring.\necho \"Simulating query string manipulation with the 'attack' word in the query string...\"\ncurl -I -s \"${url}?task=blockme\" \n \n The Bash script should produce the following output, where the first call succeeds, while the remaining one are blocked by the WAF Policy configured in prevention mode. \n \n Calling REST API...\nHTTP/2 200\ncontent-length: 9593\ncontent-type: text/html; charset=utf-8\naccept-ranges: bytes\nvary: Accept-Encoding\naccess-control-allow-origin: *\naccess-control-allow-credentials: true\nx-azure-ref: 05mwQZAAAAADma91JbmU0TJqRqS2lyFurTUlMMzBFREdFMDYwOQA3YTk2NzZiMS0xZmRjLTQ0OWYtYmI1My1hNDUxMDVjNGZmYmM=\nx-cache: CONFIG_NOCACHE\ndate: Tue, 14 Mar 2023 12:47:33 GMT\n\nSimulating SQL injection...\nHTTP/2 403\nx-azure-ref: 05mwQZAAAAABaQCSGQToQT4tifYGpmsTmTUlMMzBFREdFMDYxNQA3YTk2NzZiMS0xZmRjLTQ0OWYtYmI1My1hNDUxMDVjNGZmYmM=\ndate: Tue, 14 Mar 2023 12:47:34 GMT\n\nSimulating XSS...\nHTTP/2 403\nx-azure-ref: 05mwQZAAAAAAJZzCrTmN4TLY+bZOxskzOTUlMMzBFREdFMDYxMwA3YTk2NzZiMS0xZmRjLTQ0OWYtYmI1My1hNDUxMDVjNGZmYmM=\ndate: Tue, 14 Mar 2023 12:47:33 GMT\n\nSimulating query string manipulation with the 'blockme' word in the query string...\nHTTP/2 403\nx-azure-ref: 05mwQZAAAAADAle0hOg4FTYH6Q1LHIP50TUlMMzBFREdFMDYyMAA3YTk2NzZiMS0xZmRjLTQ0OWYtYmI1My1hNDUxMDVjNGZmYmM=\ndate: Tue, 14 Mar 2023 12:47:33 GMT \n \n Front Door WAF Policies and Application Gateway WAF policies can be configured to run in the following two modes: \n \n \n Detection mode : When run in detection mode, WAF doesn't take any other actions other than monitors and logs the request and its matched WAF rule to WAF logs. You can turn on logging diagnostics for Front Door. When you use the portal, go to the Diagnostics section. \n \n \n Prevention mode : In prevention mode, WAF takes the specified action if a request matches a rule. If a match is found, no further rules with lower priority are evaluated. Any matched requests are also logged in the WAF logs. \n \n \n For more information, see Azure Web Application Firewall on Azure Front Door. \n \n Review deployed resources \n \n You can use the Azure portal or the Azure CLI to list the deployed resources in the resource group: \n \n az resource list --resource-group <resource-group-name> \n \n You can also use the following PowerShell cmdlet to list the deployed resources in the resource group: \n \n Get-AzResource -ResourceGroupName <resource-group-name> \n \n \n Clean up resources \n \n You can delete the resource group using the following Azure CLI command when you no longer need the resources you created. This will remove all the Azure resources. \n \n az group delete --name <resource-group-name> \n \n Alternatively, you can use the following PowerShell cmdlet to delete the resource group and all the Azure resources. \n Remove-AzResourceGroup -Name <resource-group-name> \n   ","body@stripHtml({\"removeProcessingText\":true,\"removeSpoilerMarkup\":true,\"removeTocMarkup\":true,\"truncateLength\":-1})@stringLength":"82766","kudosSumWeight":4,"repliesCount":4,"readOnly":false,"images":{"__typename":"AssociatedImageConnection","edges":[{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDE","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDgxNzc1LTU2MDI0MGk4QTVBRENFMDBGOUM0QUMz?revision=4\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDI","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDgxNzc1LTU2MDI0M2k3RUU0Q0Q0MTZERDRERTlD?revision=4\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDM","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDgxNzc1LTU2MDI0NGlDMEQwMzdCMEJFOTY5NzYy?revision=4\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDQ","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDgxNzc1LTU2MDI0Nmk2RkU5OUIxMDhEOTVBMzUw?revision=4\"}"}}],"totalCount":4,"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}},"videos":{"__typename":"VideoConnection","edges":[],"totalCount":0,"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}},"coverImage":null,"coverImageProperties":{"__typename":"CoverImageProperties","style":"STANDARD","titlePosition":"BOTTOM","altText":""}},"Conversation:conversation:4036254":{"__typename":"Conversation","id":"conversation:4036254","topic":{"__typename":"BlogTopicMessage","uid":4036254},"lastPostingActivityTime":"2025-02-11T01:02:59.863-08:00","solved":false},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDM2MjU0LTU0NDQxOGk2NTNEOURBOTc1N0VGQjMz?revision=7\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDM2MjU0LTU0NDQxOGk2NTNEOURBOTc1N0VGQjMz?revision=7","title":"zone-redundant-node-pool-before.png","associationType":"TEASER","width":670,"height":680,"altText":"zone-redundant-node-pool-before.png"},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDM2MjU0LTU0NDQyMGk1RjgxMkFDQUU5RjRCQUI5?revision=7\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDM2MjU0LTU0NDQyMGk1RjgxMkFDQUU5RjRCQUI5?revision=7","title":"regions-availability-zones.png","associationType":"BODY","width":872,"height":353,"altText":"regions-availability-zones.png"},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDM2MjU0LTU0NDQyMWk0MzQyQTREN0E2MkQyQUY4?revision=7\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDM2MjU0LTU0NDQyMWk0MzQyQTREN0E2MkQyQUY4?revision=7","title":"locally-redundant-storage.png","associationType":"BODY","width":273,"height":285,"altText":"locally-redundant-storage.png"},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDM2MjU0LTU0NDQyMmk2QzI1M0E5MjkxODgxOTg1?revision=7\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDM2MjU0LTU0NDQyMmk2QzI1M0E5MjkxODgxOTg1?revision=7","title":"zone-redundant-storage.png","associationType":"BODY","width":503,"height":501,"altText":"zone-redundant-storage.png"},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDM2MjU0LTU0NDQyNGk1NzJBODc4N0NCM0QzODlF?revision=7\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDM2MjU0LTU0NDQyNGk1NzJBODc4N0NCM0QzODlF?revision=7","title":"architecture01.png","associationType":"BODY","width":410,"height":660,"altText":"architecture01.png"},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDM2MjU0LTU0NDQyOGlBNEZFMTJCNDBDMUY3RTAx?revision=7\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDM2MjU0LTU0NDQyOGlBNEZFMTJCNDBDMUY3RTAx?revision=7","title":"zone-redundant-node-pool-before.png","associationType":"BODY","width":670,"height":680,"altText":"zone-redundant-node-pool-before.png"},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDM2MjU0LTU0NDQzNWk0MDAzMDFEMUExQjM1MDYw?revision=7\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDM2MjU0LTU0NDQzNWk0MDAzMDFEMUExQjM1MDYw?revision=7","title":"zone-redundant-node-pool-after.png","associationType":"BODY","width":730,"height":680,"altText":"zone-redundant-node-pool-after.png"},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDM2MjU0LTU0NDQ0NGk1MTU5QUZFOEYwNkM2Mjk0?revision=7\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDM2MjU0LTU0NDQ0NGk1MTU5QUZFOEYwNkM2Mjk0?revision=7","title":"architecture02.png","associationType":"BODY","width":410,"height":660,"altText":"architecture02.png"},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDM2MjU0LTU0NDQ3M2k5Njg5MkMzMkY1NEMwNUJD?revision=7\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDM2MjU0LTU0NDQ3M2k5Njg5MkMzMkY1NEMwNUJD?revision=7","title":"zonal-node-pools-before.png","associationType":"BODY","width":1200,"height":560,"altText":"zonal-node-pools-before.png"},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDM2MjU0LTU0NDUwNGkzMzMwMzRBQUVDRDY3QTA2?revision=7\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDM2MjU0LTU0NDUwNGkzMzMwMzRBQUVDRDY3QTA2?revision=7","title":"zonal-node-pools-az-down.png","associationType":"BODY","width":1280,"height":560,"altText":"zonal-node-pools-az-down.png"},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDM2MjU0LTU0NDUwNWkyOUM4MENBRTVGMkFCNzQ2?revision=7\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDM2MjU0LTU0NDUwNWkyOUM4MENBRTVGMkFCNzQ2?revision=7","title":"zonal-node-pools-after.png","associationType":"BODY","width":1200,"height":560,"altText":"zonal-node-pools-after.png"},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDM2MjU0LTU0NTA5MmlFMTI3NzI3NDBDOURGODNF?revision=7\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDM2MjU0LTU0NTA5MmlFMTI3NzI3NDBDOURGODNF?revision=7","title":"zonal-node-pools-zrs-disk-before.png","associationType":"BODY","width":1200,"height":560,"altText":"zonal-node-pools-zrs-disk-before.png"},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDM2MjU0LTU0NTA5NmkwNDNGNUNBQkIwRjM3RUJF?revision=7\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDM2MjU0LTU0NTA5NmkwNDNGNUNBQkIwRjM3RUJF?revision=7","title":"zonal-node-pools-zrs-disk-after.png","associationType":"BODY","width":1200,"height":560,"altText":"zonal-node-pools-zrs-disk-after.png"},"BlogTopicMessage:message:4036254":{"__typename":"BlogTopicMessage","subject":"A Practical Guide to Zone Redundant AKS Clusters and Storage","conversation":{"__ref":"Conversation:conversation:4036254"},"id":"message:4036254","revisionNum":7,"uid":4036254,"depth":0,"board":{"__ref":"Blog:board:FastTrackforAzureBlog"},"author":{"__ref":"User:user:988334"},"teaser@stripHtml({\"removeProcessingText\":true,\"truncateLength\":-1})":" This sample explains how you can create a zone redundant AKS cluster and the implications of each approach on the deployment strategy and configuration of the persistent volumes used by the workloads. \n \n   ","introduction":"","metrics":{"__typename":"MessageMetrics","views":16592},"postTime":"2024-01-22T09:02:33.249-08:00","lastPublishTime":"2025-02-11T01:02:59.863-08:00","body@stripHtml({\"removeProcessingText\":true,\"removeSpoilerMarkup\":true,\"removeTocMarkup\":true,\"truncateLength\":-1})":" Resilience is a critical aspect of any modern application infrastructure. When failures occur, a resilient solution is capable of recovering quickly and maintaining functionality. To enhance the intra-region resiliency of an Azure Kubernetes Service (AKS) cluster, creating a zone redundant cluster is of utmost importance. This article explains how you can create a zone redundant AKS cluster and the implications of each approach on the deployment strategy and configuration of the persistent volumes used by the workloads. Here \n re you can find the companion code for this article. \n   \n Prerequisites \n \n An active Azure subscription. If you don't have one, create a free Azure account before you begin. \n Visual Studio Code installed on one of the supported platforms along with the HashiCorp Terraform. \n Azure CLI version 2.56.0 or later installed. To install or upgrade, see Install Azure CLI. \n aks-preview  Azure CLI extension of version 0.5.140 or later installed \n The deployment must be started by a user who has sufficient permissions to assign roles, such as a  User Access Administrator  or  Owner . \n Your Azure account also needs  Microsoft.Resources/deployments/write  permissions at the subscription level. \n \n   \n Availability Zones \n Availability Zones are separate groups of datacenters in a region. They are located close enough to have fast connections to each other, with a round-trip latency of less than 2ms. However, they are also far enough apart to minimize the chances of all zones being affected by local issues or bad weather. Each availability zone has its own power, cooling, and networking systems. If one zone goes down, the other zones can still support regional services, capacity, and high availability. This setup helps ensure that your data remains synchronized and accessible even during unexpected events. The diagram below illustrates examples of Azure regions, with Regions 1 and 2 being equipped with availability zones. \n   \n Not all the Azure regions support availability zones. For more information on which regions support Availability Zones, you can refer to the Availability zone service and regional support documentation. \n   \n Azure services with availability zone support \n Azure is continuously expanding its range of services that support availability zones. These services can be divided into three types: zonal, zone-redundant, and always-available. \n   \n \n Zonal services allow you to deploy resources to a specific availability zone of your choice, ensuring optimal performance and low latency. Resiliency is achieved by replicating applications and data across multiple zones within the same region. For example, you can align the agent nodes of an AKS node pool and the managed disks created via persistent volume claims to a specific zone, increasing resilience by deploying multiple instances of resources across different zones. \n Zone-redundant services automatically distribute or replicate resources across multiple availability zones. This ensures that even if one zone fails, the data remains highly available. For instance, you can create a zone-redundant VMSS-based node pool where the nodes are spread across availability zones within a region. \n Always-available services are resilient to both zone-wide and region-wide outages. These services are available across all Azure geographies and provide uninterrupted availability. For a comprehensive list of always-available services, also known as non-regional services, you can refer to the Products available by region documentation on Azure. \n \n   \n Maximizing Resilience with Availability Zones \n By utilizing Availability Zones, the resilience of an AKS cluster can be greatly improved. When creating an AKS cluster, spreading the AKS agent nodes across multiple zones can enhance the cluster's resilience within a region. This involves distributing AKS agent nodes across physically separate data centers, ensuring that nodes in one pool continue running even if another zone encounters failures. If co-locality requirements exist, there are two options available: \n   \n \n Regular VMSS-based AKS Deployment: This involves deploying the AKS cluster or one of its node pools into a single Availability Zone, ensuring proximity and minimizing internode latency. \n Proximity Placement Groups: Proximity Placement Groups (PPG) can be utilized to minimize internode latency while maintaining zone redundancy. Nodes within a PPG are placed in the same data center, ensuring optimal communication and minimizing latency. \n \n   \n Creating Zone Redundant AKS Clusters \n There are two approaches to creating a zone redundant AKS cluster: \n   \n \n Zone Redundant Node Pool: Another approach involves creating a zone redundant node pool, where nodes are spread across multiple Availability Zones. This ensures that the node pool can withstand failures in any zone while maintaining the desired functionality. \n AKS Cluster with three Node Pools: In this approach, an AKS cluster is created with three node pools, each assigned to a different availability zone. This ensures that the cluster has redundancy across zones. \n \n   \n Let's explore the implications on workload deployment strategy and storage configuration for these two strategies. \n Azure Storage redundancy \n Azure Storage always stores three copies of your data so that your information is protected from planned and unplanned events, including transient hardware failures, network or power outages, and massive natural disasters. Redundancy ensures that your storage account meets its availability and durability targets even in case of failures. \n When deciding which redundancy option is best for your scenario, consider the tradeoffs between lower costs and higher availability. The factors that help determine which redundancy option you should choose include: \n   \n \n How your data is replicated within the primary region. \n Whether your data is replicated to a second region that is geographically distant to the primary region, to protect against regional disasters (geo-replication). \n Whether your application requires read access to the replicated data in the secondary region if the primary region becomes unavailable for any reason (geo-replication with read access). \n \n   \n Data in Azure Storage is always replicated three times in the primary region. Azure Storage offers two options for how your data is replicated in the primary region: locally redundant storage (LRS) and zone-redundant storage (ZRS). For more information, see Azure Storage redundancy. \n   \n Locally redundant storage (LRS) \n Locally redundant storage (LRS) is the lowest-cost redundancy option and offers the least durability compared to other options. LRS protects your data against server rack and drive failures. However, if a disaster such as fire or flooding occurs within the data center, all replicas of a storage account using LRS may be lost or unrecoverable. To mitigate this risk, Microsoft recommends using zone-redundant storage (ZRS), geo-redundant storage (GRS), or geo-zone-redundant storage (GZRS). A write request to a storage account that is using LRS happens synchronously. The write operation returns successfully only after the data is written to all three replicas. The following diagram shows how your data is replicated within a single data center with LRS: \n   \n A LRS managed disk can only be attached and used by a virtual machine located in the same availability zone. LRS is a good choice for the following scenarios: \n   \n \n If your application stores data that can be easily reconstructed if data loss occurs, you may opt for LRS. \n If your application is restricted to replicating data only within a country or region due to data governance requirements, you may opt for LRS. In some cases, the paired regions across which the data is geo-replicated may be in another country or region. For more information on paired regions, see Azure regions. \n If your scenario is using Azure unmanaged disks, you may opt for LRS. While it's possible to create a storage account for Azure unmanaged disks that uses GRS, it isn't recommended due to potential issues with consistency over asynchronous geo-replication. \n \n   \n LRS is the redundancy model used by the built-in storage classes in Azure Kubernetes Service (AKS), such as  managed-csi  and  managed-csi-premium . For more information, see Use the Azure Disk Container Storage Interface (CSI) driver in Azure Kubernetes Service (AKS). \n   \n Zone-redundant storage (ZRS) \n Zone-redundant storage (ZRS) replicates your storage account synchronously across three Azure availability zones in the primary region. Each availability zone is a separate physical location with independent power, cooling, and networking. ZRS offers durability for storage resources of at least 99.9999999999% (12 9's) over a given year. With ZRS, your data is still accessible for both read and write operations even if a zone becomes unavailable. If a zone becomes unavailable, Azure undertakes networking updates, such as DNS repointing. These updates may affect your application if you access data before the updates have completed. When designing applications for ZRS, follow practices for transient fault handling, including implementing retry policies with exponential back-off. \n A write request to a storage account that is using ZRS happens synchronously. The write operation returns successfully only after the data is written to all replicas across the three availability zones. If an availability zone is temporarily unavailable, the operation returns successfully after the data is written to all available zones. Microsoft recommends using ZRS in the primary region for scenarios that require high availability. ZRS is also recommended for restricting replication of data to a particular country or region to meet data governance requirements. Microsoft recommends using ZRS for Azure Files workloads. If a zone becomes unavailable, no remounting of Azure file shares from the connected clients is required. \n The following diagram shows how your data is replicated across availability zones in the primary region with ZRS: \n   \n ZRS provides excellent performance, low latency, and resiliency for your data if it becomes temporarily unavailable. However, ZRS by itself may not protect your data against a regional disaster where multiple zones are permanently affected. For protection against regional disasters, Microsoft recommends using geo-zone-redundant storage (GZRS), which uses ZRS in the primary region and also geo-replicates your data to a secondary region. \n The archive tier for Blob Storage isn't currently supported for ZRS, GZRS, or RA-GZRS accounts. Unmanaged disks don't support ZRS or GZRS. For more information about which regions support ZRS, see Azure regions with availability zones. \n   \n Zone-redundant storage for managed disks \n Zone-redundant storage (ZRS) synchronously replicates your Azure managed disk across three Azure availability zones in the region you select. Each availability zone is a separate physical location with independent power, cooling, and networking. ZRS disks provide at least 99.9999999999% (12 9's) of durability over a given year. A ZRS managed disk can be attached by a virtual machines in a different availability zone. ZRS disks are currently not available an all the Azure regions. For more information on ZRS disks, see Zone Redundant Storage (ZRS) option for Azure Disks for high availability \n   \n AKS cluster with Zone-Redundant Node Pools \n The first strategy entails deploying an AKS cluster with zone-redundant node pools, where the nodes are distributed evenly across the availability zones within a region. The diagram below illustrates an AKS cluster with a zone-redundant system-mode node pool and a zone-redundant user-mode node pool. \n   \n Deploy an AKS cluster with Zone-Redundant Node Pools using Azure CLI \n When creating a cluster using the az aks create command, the  --zones  parameter allows you to specify the availability zones for deploying agent nodes. Here's an example that demonstrates creating an AKS cluster, with a total of three nodes. One node is deployed in zone  1 , another in zone  2 , and the third in zone  3 . For more information, see Create an AKS cluster across availability zones. \n \n #!/bin/bash\n\n# Variables\nsource ./00-variables.sh\n\n# Check if the resource group already exists\necho \"Checking if [\"$resourceGroupName\"] resource group actually exists in the [$subscriptionName] subscription...\"\n\naz group show --name $resourceGroupName --only-show-errors &>/dev/null\n\nif [[ $? != 0 ]]; then\n echo \"No [\"$resourceGroupName\"] resource group actually exists in the [$subscriptionName] subscription\"\n echo \"Creating [\"$resourceGroupName\"] resource group in the [$subscriptionName] subscription...\"\n\n # create the resource group\n az group create \\\n --name $resourceGroupName \\\n --location $location \\\n --only-show-errors 1>/dev/null\n\n if [[ $? == 0 ]]; then\n echo \"[\"$resourceGroupName\"] resource group successfully created in the [$subscriptionName] subscription\"\n else\n echo \"Failed to create [\"$resourceGroupName\"] resource group in the [$subscriptionName] subscription\"\n exit -1\n fi\nelse\n echo \"[\"$resourceGroupName\"] resource group already exists in the [$subscriptionName] subscription\"\nfi\n\n# Check if log analytics workspace exists and retrieve its resource id\necho \"Retrieving [\"$logAnalyticsName\"] Log Analytics resource id...\"\naz monitor log-analytics workspace show \\\n --name $logAnalyticsName \\\n --resource-group $resourceGroupName \\\n --query id \\\n --output tsv \\\n --only-show-errors &>/dev/null\n\nif [[ $? != 0 ]]; then\n echo \"No [\"$logAnalyticsName\"] log analytics workspace actually exists in the [\"$resourceGroupName\"] resource group\"\n echo \"Creating [\"$logAnalyticsName\"] log analytics workspace in the [\"$resourceGroupName\"] resource group...\"\n\n # Create the log analytics workspace\n az monitor log-analytics workspace create \\\n --name $logAnalyticsName \\\n --resource-group $resourceGroupName \\\n --identity-type SystemAssigned \\\n --sku $logAnalyticsSku \\\n --location $location \\\n --only-show-errors\n\n if [[ $? == 0 ]]; then\n echo \"[\"$logAnalyticsName\"] log analytics workspace successfully created in the [\"$resourceGroupName\"] resource group\"\n else\n echo \"Failed to create [\"$logAnalyticsName\"] log analytics workspace in the [\"$resourceGroupName\"] resource group\"\n exit -1\n fi\nelse\n echo \"[\"$logAnalyticsName\"] log analytics workspace already exists in the [\"$resourceGroupName\"] resource group\"\nfi\n\n# Retrieve the log analytics workspace id\nworkspaceResourceId=$(az monitor log-analytics workspace show \\\n --name $logAnalyticsName \\\n --resource-group $resourceGroupName \\\n --query id \\\n --output tsv \\\n --only-show-errors 2>/dev/null)\n\nif [[ -n $workspaceResourceId ]]; then\n echo \"Successfully retrieved the resource id for the [\"$logAnalyticsName\"] log analytics workspace\"\nelse\n echo \"Failed to retrieve the resource id for the [\"$logAnalyticsName\"] log analytics workspace\"\n exit -1\nfi\n\n# Check if the client virtual network already exists\necho \"Checking if [$virtualNetworkName] virtual network actually exists in the [$resourceGroupName] resource group...\"\naz network vnet show \\\n --name $virtualNetworkName \\\n --resource-group $resourceGroupName \\\n --only-show-errors &>/dev/null\n\nif [[ $? != 0 ]]; then\n echo \"No [$virtualNetworkName] virtual network actually exists in the [$resourceGroupName] resource group\"\n echo \"Creating [$virtualNetworkName] virtual network in the [$resourceGroupName] resource group...\"\n\n # Create the client virtual network\n az network vnet create \\\n --name $virtualNetworkName \\\n --resource-group $resourceGroupName \\\n --location $location \\\n --address-prefixes $virtualNetworkAddressPrefix \\\n --subnet-name $systemSubnetName \\\n --subnet-prefix $systemSubnetPrefix \\\n --only-show-errors 1>/dev/null\n\n if [[ $? == 0 ]]; then\n echo \"[$virtualNetworkName] virtual network successfully created in the [$resourceGroupName] resource group\"\n else\n echo \"Failed to create [$virtualNetworkName] virtual network in the [$resourceGroupName] resource group\"\n exit -1\n fi\nelse\n echo \"[$virtualNetworkName] virtual network already exists in the [$resourceGroupName] resource group\"\nfi\n\n# Check if the user subnet already exists\necho \"Checking if [$userSubnetName] user subnet actually exists in the [$virtualNetworkName] virtual network...\"\naz network vnet subnet show \\\n --name $userSubnetName \\\n --vnet-name $virtualNetworkName \\\n --resource-group $resourceGroupName \\\n --only-show-errors &>/dev/null\n\nif [[ $? != 0 ]]; then\n echo \"No [$userSubnetName] user subnet actually exists in the [$virtualNetworkName] virtual network\"\n echo \"Creating [$userSubnetName] user subnet in the [$virtualNetworkName] virtual network...\"\n\n # Create the user subnet\n az network vnet subnet create \\\n --name $userSubnetName \\\n --vnet-name $virtualNetworkName \\\n --resource-group $resourceGroupName \\\n --address-prefix $userSubnetPrefix \\\n --only-show-errors 1>/dev/null\n\n if [[ $? == 0 ]]; then\n echo \"[$userSubnetName] user subnet successfully created in the [$virtualNetworkName] virtual network\"\n else\n echo \"Failed to create [$userSubnetName] user subnet in the [$virtualNetworkName] virtual network\"\n exit -1\n fi\nelse\n echo \"[$userSubnetName] user subnet already exists in the [$virtualNetworkName] virtual network\"\nfi\n\n# Check if the pod subnet already exists\necho \"Checking if [$podSubnetName] pod subnet actually exists in the [$virtualNetworkName] virtual network...\"\naz network vnet subnet show \\\n --name $podSubnetName \\\n --vnet-name $virtualNetworkName \\\n --resource-group $resourceGroupName \\\n --only-show-errors &>/dev/null\n\nif [[ $? != 0 ]]; then\n echo \"No [$podSubnetName] pod subnet actually exists in the [$virtualNetworkName] virtual network\"\n echo \"Creating [$podSubnetName] pod subnet in the [$virtualNetworkName] virtual network...\"\n\n # Create the pod subnet\n az network vnet subnet create \\\n --name $podSubnetName \\\n --vnet-name $virtualNetworkName \\\n --resource-group $resourceGroupName \\\n --address-prefix $podSubnetPrefix \\\n --only-show-errors 1>/dev/null\n\n if [[ $? == 0 ]]; then\n echo \"[$podSubnetName] pod subnet successfully created in the [$virtualNetworkName] virtual network\"\n else\n echo \"Failed to create [$podSubnetName] pod subnet in the [$virtualNetworkName] virtual network\"\n exit -1\n fi\nelse\n echo \"[$podSubnetName] pod subnet already exists in the [$virtualNetworkName] virtual network\"\nfi\n\n# Check if the bastion subnet already exists\necho \"Checking if [$bastionSubnetName] bastion subnet actually exists in the [$virtualNetworkName] virtual network...\"\naz network vnet subnet show \\\n --name $bastionSubnetName \\\n --vnet-name $virtualNetworkName \\\n --resource-group $resourceGroupName \\\n --only-show-errors &>/dev/null\n\nif [[ $? != 0 ]]; then\n echo \"No [$bastionSubnetName] bastion subnet actually exists in the [$virtualNetworkName] virtual network\"\n echo \"Creating [$bastionSubnetName] bastion subnet in the [$virtualNetworkName] virtual network...\"\n\n # Create the bastion subnet\n az network vnet subnet create \\\n --name $bastionSubnetName \\\n --vnet-name $virtualNetworkName \\\n --resource-group $resourceGroupName \\\n --address-prefix $bastionSubnetPrefix \\\n --only-show-errors 1>/dev/null\n\n if [[ $? == 0 ]]; then\n echo \"[$bastionSubnetName] bastion subnet successfully created in the [$virtualNetworkName] virtual network\"\n else\n echo \"Failed to create [$bastionSubnetName] bastion subnet in the [$virtualNetworkName] virtual network\"\n exit -1\n fi\nelse\n echo \"[$bastionSubnetName] bastion subnet already exists in the [$virtualNetworkName] virtual network\"\nfi\n\n# Retrieve the system subnet id\nsystemSubnetId=$(az network vnet subnet show \\\n --name $systemSubnetName \\\n --vnet-name $virtualNetworkName \\\n --resource-group $resourceGroupName \\\n --query id \\\n --output tsv \\\n --only-show-errors 2>/dev/null)\n\nif [[ -n $systemSubnetId ]]; then\n echo \"Successfully retrieved the resource id for the [$systemSubnetName] subnet\"\nelse\n echo \"Failed to retrieve the resource id for the [$systemSubnetName] subnet\"\n exit -1\nfi\n\n# Retrieve the user subnet id\nuserSubnetId=$(az network vnet subnet show \\\n --name $userSubnetName \\\n --vnet-name $virtualNetworkName \\\n --resource-group $resourceGroupName \\\n --query id \\\n --output tsv \\\n --only-show-errors 2>/dev/null)\n\nif [[ -n $userSubnetId ]]; then\n echo \"Successfully retrieved the resource id for the [$userSubnetName] subnet\"\nelse\n echo \"Failed to retrieve the resource id for the [$userSubnetName] subnet\"\n exit -1\nfi\n\n# Retrieve the pod subnet id\npodSubnetId=$(az network vnet subnet show \\\n --name $podSubnetName \\\n --vnet-name $virtualNetworkName \\\n --resource-group $resourceGroupName \\\n --query id \\\n --output tsv \\\n --only-show-errors 2>/dev/null)\n\nif [[ -n $podSubnetId ]]; then\n echo \"Successfully retrieved the resource id for the [$podSubnetName] subnet\"\nelse\n echo \"Failed to retrieve the resource id for the [$podSubnetName] subnet\"\n exit -1\nfi\n\n# Get the last Kubernetes version available in the region\nkubernetesVersion=$(az aks get-versions \\\n --location $location \\\n --query \"values[?isPreview==null].version | sort(@) | [-1]\" \\\n --output tsv \\\n --only-show-errors 2>/dev/null)\n\n# Create AKS cluster\necho \"Checking if [\"$aksClusterName\"] aks cluster actually exists in the [\"$resourceGroupName\"] resource group...\"\n\naz aks show --name $aksClusterName --resource-group $resourceGroupName &>/dev/null\n\nif [[ $? != 0 ]]; then\n echo \"No [\"$aksClusterName\"] aks cluster actually exists in the [\"$resourceGroupName\"] resource group\"\n echo \"Creating [\"$aksClusterName\"] aks cluster in the [\"$resourceGroupName\"] resource group...\"\n\n # Create the aks cluster\n az aks create \\\n --name $aksClusterName \\\n --resource-group $resourceGroupName \\\n --service-cidr $serviceCidr \\\n --dns-service-ip $dnsServiceIp \\\n --os-sku $osSku \\\n --node-osdisk-size $osDiskSize \\\n --node-osdisk-type $osDiskType \\\n --vnet-subnet-id $systemSubnetId \\\n --nodepool-name $systemNodePoolName \\\n --pod-subnet-id $podSubnetId \\\n --enable-cluster-autoscaler \\\n --node-count $nodeCount \\\n --min-count $minCount \\\n --max-count $maxCount \\\n --max-pods $maxPods \\\n --location $location \\\n --kubernetes-version $kubernetesVersion \\\n --ssh-key-value $sshKeyValue \\\n --node-vm-size $nodeSize \\\n --enable-addons monitoring \\\n --workspace-resource-id $workspaceResourceId \\\n --network-policy $networkPolicy \\\n --network-plugin $networkPlugin \\\n --service-cidr $serviceCidr \\\n --enable-managed-identity \\\n --enable-workload-identity \\\n --enable-oidc-issuer \\\n --enable-aad \\\n --enable-azure-rbac \\\n --aad-admin-group-object-ids $aadProfileAdminGroupObjectIDs \\\n --nodepool-taints CriticalAddonsOnly=true:NoSchedule \\\n --nodepool-labels nodePoolMode=system created=AzureCLI osDiskType=ephemeral osType=Linux\\\n --nodepool-tags osDiskType=ephemeral osDiskType=ephemeral osType=Linux \\\n --tags created=AzureCLI \\\n --only-show-errors \\\n --zones 1 2 3 1>/dev/null\n\n if [[ $? == 0 ]]; then\n echo \"[\"$aksClusterName\"] aks cluster successfully created in the [\"$resourceGroupName\"] resource group\"\n else\n echo \"Failed to create [\"$aksClusterName\"] aks cluster in the [\"$resourceGroupName\"] resource group\"\n exit -1\n fi\nelse\n echo \"[\"$aksClusterName\"] aks cluster already exists in the [\"$resourceGroupName\"] resource group\"\nfi\n\n# Check if the user node pool exists\necho \"Checking if [\"$aksClusterName\"] aks cluster actually has a user node pool...\"\naz aks nodepool show \\\n --name $userNodePoolName \\\n --cluster-name $aksClusterName \\\n --resource-group $resourceGroupName &>/dev/null\n\nif [[ $? == 0 ]]; then\n echo \"A node pool called [$userNodePoolName] already exists in the [$aksClusterName] AKS cluster\"\nelse\n echo \"No node pool called [$userNodePoolName] actually exists in the [$aksClusterName] AKS cluster\"\n echo \"Creating [$userNodePoolName] node pool in the [$aksClusterName] AKS cluster...\"\n\n az aks nodepool add \\\n --name $userNodePoolName \\\n --mode $mode \\\n --cluster-name $aksClusterName \\\n --resource-group $resourceGroupName \\\n --enable-cluster-autoscaler \\\n --eviction-policy $evictionPolicy \\\n --os-type $osType \\\n --os-sku $osSku \\\n --node-vm-size $vmSize \\\n --node-osdisk-size $osDiskSize \\\n --node-osdisk-type $osDiskType \\\n --node-count $nodeCount \\\n --min-count $minCount \\\n --max-count $maxCount \\\n --max-pods $maxPods \\\n --tags osDiskType=managed osType=Linux \\\n --labels osDiskType=ephemeral osType=Linux \\\n --vnet-subnet-id $userSubnetId \\\n --pod-subnet-id $podSubnetId \\\n --labels nodePoolMode=user created=AzureCLI osDiskType=ephemeral osType=Linux\\\n --tags osDiskType=ephemeral osDiskType=ephemeral osType=Linux \\\n --zones 1 2 3 1>/dev/null\n\n if [[ $? == 0 ]]; then\n echo \"[$userNodePoolName] node pool successfully created in the [$aksClusterName] AKS cluster\"\n else\n echo \"Failed to create the [$userNodePoolName] node pool in the [$aksClusterName] AKS cluster\"\n exit -1\n fi\nfi\n\n# Use the following command to configure kubectl to connect to the new Kubernetes cluster\necho \"Getting access credentials configure kubectl to connect to the [\"$aksClusterName\"] AKS cluster...\"\naz aks get-credentials \\\n --name $aksClusterName \\\n --resource-group $resourceGroupName \\\n --overwrite-existing\n\nif [[ $? == 0 ]]; then\n echo \"Credentials for the [\"$aksClusterName\"] cluster successfully retrieved\"\nelse\n echo \"Failed to retrieve the credentials for the [\"$aksClusterName\"] cluster\"\n exit -1\nfi \n \n The variables used by the script are defined in a separate file included in the script: \n   \n # Azure Kubernetes Service (AKS) cluster\nprefix=\"Horus\"\naksClusterName=\"${prefix}Aks\"\nresourceGroupName=\"${prefix}RG\"\nlocation=\"WestEurope\"\nosSku=\"AzureLinux\"\nosDiskSize=50\nosDiskType=\"Ephemeral\"\nsystemNodePoolName=\"system\"\n\n# Virtual Network\nvirtualNetworkName=\"${prefix}VNet\"\nvirtualNetworkAddressPrefix=\"10.0.0.0/8\"\nsystemSubnetName=\"SystemSubnet\"\nsystemSubnetPrefix=\"10.240.0.0/16\"\nuserSubnetName=\"UserSubnet\"\nuserSubnetPrefix=\"10.241.0.0/16\"\npodSubnetName=\"PodSubnet\"\npodSubnetPrefix=\"10.242.0.0/16\"\nbastionSubnetName=\"AzureBastionSubnet\"\nbastionSubnetPrefix=\"10.243.2.0/24\"\n\n# AKS variables\ndnsServiceIp=\"172.16.0.10\"\nserviceCidr=\"172.16.0.0/16\"\naadProfileAdminGroupObjectIDs=\"4e4d0501-e693-4f3e-965b-5bec6c410c03\"\n\n# Log Analytics\nlogAnalyticsName=\"${prefix}LogAnalytics\"\nlogAnalyticsSku=\"PerGB2018\"\n\n# Node count, node size, and ssh key location for AKS nodes\nnodeSize=\"Standard_D4ds_v4\"\n\nsshKeyValue=\"~/.ssh/id_rsa.pub\"\n\n# Network policy\nnetworkPolicy=\"azure\"\nnetworkPlugin=\"azure\"\n\n# Node count variables\nnodeCount=3\nminCount=3\nmaxCount=20\nmaxPods=100\n\n# Node pool variables\nuserNodePoolName=\"user\"\nevictionPolicy=\"Delete\"\nvmSize=\"Standard_D4ds_v4\" #Standard_F8s_v2, Standard_D4ads_v5\nosType=\"Linux\"\nmode=\"User\"\n\n# SubscriptionName and tenantId of the current subscription\nsubscriptionName=$(az account show --query name --output tsv)\ntenantId=$(az account show --query tenantId --output tsv)\n\n# Kubernetes sample\nnamespace=\"disk-test\"\nappLabels=(\"lrs-nginx\" \"zrs-nginx\")\n \n   \n Spread Pods across Zones using Pod Topology Spread Constraints \n When deploying pods to an AKS cluster that spans multiple availability zones, it is essential to ensure optimal distribution and resilience. To achieve this, you can utilize the Pod Topology Spread Constraints Kubernetes feature. By implementing Pod Topology Spread Constraints, you gain granular control over how pods are spread across your AKS cluster, taking into account failure-domains like regions, availability zones, and nodes. Specifically, you can create constraints that span pod replicas across availability zones, as well as across different nodes within a single availability zone. By doing so, you can achieve several benefits. First, spreading pod replicas across availability zones ensures that your application remains available even if an entire zone goes down. Second, distributing pods across different nodes within a zone enhances fault tolerance, as it minimizes the impact of node failures or maintenance activities. By using Pod Topology Spread Constraints, you can maximize the resilience and availability of your applications in an AKS cluster. This approach optimizes resource utilization, minimizes downtime, and delivers a robust infrastructure for your workloads across multiple availability zones and nodes. \n   \n LRS and ZRS Persistent Volumes \n When using a single node pool spanning across three availability zones, you need to use Zone Redundant Storage for managed disks (ZRS). In fact, if a pod replica attaches a persistent volume in one availability zone, and then the pod is rescheduled in another availability zone, the pod could not reattached the managed disk if this is configured to use Locally Redundant Storage for managed disks. \n When using a single node pool spanning across three availability zones, it is important to utilize Zone Redundant Storage for managed disks (ZRS) for persistent volumes. This ensures data availability and reliability. In scenarios where a pod replica attaches a persistent volume in one availability zone and gets rescheduled to another availability zone, the pod could not reattach the managed disk if it is configured with Locally redundant storage for managed disks. To prevent potential issues, it is recommended to configure persistent volume claims to use a storage class that is set up to utilize Zone Redundant Storage for managed disks (ZRS). By doing so, you can ensure the persistence and availability of your data across availability zones. For more information on persistent volume claims, you can refer to the official Kubernetes documentation. \n   \n Custom ZRS Storage Classes \n The Azure Disks Container Storage Interface (CSI) driver is a CSI specification-compliant driver used by Azure Kubernetes Service (AKS) to manage the lifecycle of Azure Disk. The CSI is a standard for exposing arbitrary block and file storage systems to containerized workloads on Kubernetes. By adopting and using CSI, AKS now can write, deploy, and iterate plug-ins to expose new or improve existing storage systems in Kubernetes. Using CSI drivers in AKS avoids having to touch the core Kubernetes code and wait for its release cycles. AKS provides the following built-in storage classes for Azure Disks and Azure Files: \n   \n \n For Azure Disks:\n \n managed-csi : Uses Azure Standard SSD locally redundant storage (LRS) to create a managed disk. \n managed-csi-premium : Uses Azure Premium LRS to create a managed disk. \n \n \n For Azure Files:\n \n azurefile-csi : Uses Azure Standard Storage to create an Azure file share. \n azurefile-csi-premium : Uses Azure Premium Storage to create an Azure file share. \n \n \n \n   \n While these built-in storage classes are suitable for most scenarios, they use  Standard_LRS  and  Premium_LRS , which employ Locally Redundant Storage (LTS). As explained in the previous section, when deploying pods of a single workload across the availability zones in a zone-redundant AKS cluster or node pool, it is necessary to utilize Zone Redundant Storage for managed disks (ZRS) for persistent volumes. \n Zone Redundant Storage (ZRS) synchronously replicates your Azure managed disk across three availability zones within your selected region. Each availability zone is a separate physical location with independent power, cooling, and networking. With ZRS disks, you benefit from at least 99.9999999999% (12 9's) of durability over a year and the ability to recover from failures in availability zones. In case a zone goes down, a ZRS disk can be attached to a virtual machine (VM) in a different zone. \n To create a custom storage class using  StandardSSD_ZRS  or  Premium_ZRS  managed disks, you can use the following example: \n \n apiVersion: storage.k8s.io/v1\nkind: StorageClass\nmetadata:\n name: managed-csi-premium-zrs\nprovisioner: disk.csi.azure.com\nparameters:\n skuname: Premium_ZRS\nreclaimPolicy: Delete\nvolumeBindingMode: WaitForFirstConsumer\nallowVolumeExpansion: true \n \n For more information on the parameters for the Azure Disk CSI Driver, refer to the Azure Disk CSI Driver Parameters documentation. \n Similarly, you can create a storage class using the Azure Files CSI Driver with  Standard_ZRS ,  Standard_RAGZRS , and  Premium_ZRS  storage options, ensuring that data copies are stored across different zones within a region: \n \n apiVersion: storage.k8s.io/v1\nkind: StorageClass\nmetadata:\n name: azurefile-csi-premium-zrs\nmountOptions:\n- mfsymlinks\n- actimeo=30\nparameters:\n skuName: Premium_ZRS\n enableLargeFileShares: \"true\"\nprovisioner: file.csi.azure.com\nreclaimPolicy: Delete\nvolumeBindingMode: Immediate \n \n For more information about the parameters for the Azure Files CSI Driver, refer to the Azure File CSI Driver Parameters documentation. \n   \n Deploy a Workload that uses ZRS Storage to a Zone-Redundant Node Pool \n If you plan to deploy a workload to AKS which make use of the Azure Disks CSI Driver to create and attach Kubernetes persistent volumes based on ZRS managed disks, you can use the following strategy: \n   \n \n Create a Kubernetes deployment, for example using a YAML manifest. \n Use node selectors or node affinity to constraint the Kubernetes Scheduler to run the pods of each deployments on the agent nodes of a specific user-mode zone-redundant node pool using the labels of the nodes. \n Create a persistent volume claim which references a storage class which makes use of Zone Redundant Storage for managed disks (ZRS), for example the  managed-csi-premium-zrs  storage class we introduced in the previous section. \n When deploying pods to a zone-redundant node pool, it is essential to ensure optimal distribution and resilience. To achieve this, you can utilize the Pod Topology Spread Constraints Kubernetes feature. By implementing Pod Topology Spread Constraints, you gain granular control over how pods are spread across your AKS cluster, taking into account failure-domains like regions, availability zones, and nodes. In this scenario, you can create constraints that span pod replicas across availability zones, as well as across different nodes within a single availability zone. By doing so, you can achieve several benefits. First, spreading pod replicas across availability zones ensures that your application remains available even if an entire zone goes down. Second, distributing pods across different nodes within a zone enhances fault tolerance, as it minimizes the impact of node failures or maintenance activities. By using Pod Topology Spread Constraints, you can maximize the resilience and availability of your applications in an AKS cluster. This approach optimizes resource utilization, minimizes downtime, and delivers a robust infrastructure for your workloads across multiple availability zones and nodes. \n \n   \n Test Workload resiliency of an AKS cluster Zone-Redundant Node Pools \n In this test, we simulate a situation where agent nodes in a particular availability zone experience a failure and become unavailable. The goal is to ensure that the application can still operate properly on the agent nodes in the remaining availability zones. To avoid any interference from the cluster autoscaler during the test and to guarantee that the user-mode zone-redundant node pool has exactly three nodes, each in a different availability zone, you can run the following bash script. This script disables the cluster autoscaler for each node pool and manually sets the number of nodes to three. \n   \n #!/bin/bash\n\n# Variables\nsource ./00-variables.sh\nnodeCount=3\n\n# Retrieve the node count for the current node pool\necho \"Retrieving the node count for the [$userNodePoolName] node pool...\"\ncount=$(az aks nodepool show \\\n --name $userNodePoolName \\\n --cluster-name $aksClusterName \\\n --resource-group $resourceGroupName \\\n --query count \\\n --output tsv \\\n --only-show-errors)\n\n# Disable autoscaling for the current node pool\necho \"Disabling autoscaling for the [$userNodePoolName] node pool...\"\naz aks nodepool update \\\n --cluster-name $aksClusterName \\\n --name $userNodePoolName \\\n --resource-group $resourceGroupName \\\n --disable-cluster-autoscaler \\\n --only-show-errors 1>/dev/null\n\n# Run this command only if the current node count is not equal to three\nif [[ $count -ne $nodeCount ]]; then\n # Scale the current node pool to three nodes\n echo \"Scaling the [$userNodePoolName] node pool to $nodeCount nodes...\"\n az aks nodepool scale \\\n --cluster-name $aksClusterName \\\n --name $userNodePoolName \\\n --resource-group $resourceGroupName \\\n --node-count $nodeCount \\\n --only-show-errors 1>/dev/null\nelse\n echo \"The [$userNodePoolName] node pool is already scaled to $nodeCount nodes\"\nfi\n \n   \n In this test, we will create two deployments, each consisting of a single pod replica: \n   \n \n lrs-nginx: The pod for this workload mounts an LRS (Locally Redundant Storage) Azure Disk created in the node resource group of the AKS (Azure Kubernetes Service) cluster. \n zrs-nginx: The pod for this workload mounts a ZRS (Zone-Redundant Storage) Azure Disk created in the node resource group of the AKS cluster. \n \n   \n The objective is to observe the behavior of the two pods when we simulate a failure of the availability zone that hosts their agent nodes. To set up the necessary Kubernetes objects, you can use the provided script to create the following: \n   \n \n The  disk-test  namespace. \n Two persistent volume claims (PVC):  lrs-pvc-azure-disk  and  zrs-pvc-azure-disk . \n Two deployments:  lrs-nginx  and  zrs-nginx . \n \n \n #!/bin/bash\n\n# Variables\nsource ./00-variables.sh\n\n# Check if namespace exists in the cluster\nresult=$(kubectl get namespace -o jsonpath=\"{.items[?(@.metadata.name=='$namespace')].metadata.name}\")\n\nif [[ -n $result ]]; then\n echo \"$namespace namespace already exists in the cluster\"\nelse\n echo \"$namespace namespace does not exist in the cluster\"\n echo \"creating $namespace namespace in the cluster...\"\n kubectl create namespace $namespace\nfi\n\n# Create the managed-csi-premium-zrs storage class\nkubectl apply -f managed-csi-premium-zrs.yml\n\n# Create the lrs-pvc-azure-disk persistent volume claim\nkubectl apply -f lrs-pvc.yml -n $namespace\n\n# Create the lrs-nginx deployment\nkubectl apply -f lrs-deploy.yml -n $namespace\n\n# Create the zrs-pvc-azure-disk persistent volume claim\nkubectl apply -f zrs-pvc.yml -n $namespace\n\n# Create the zrs-nginx deployment\nkubectl apply -f zrs-deploy.yml -n $namespace \n \n The following YAML manifest defines the  lrs-pvc-azure-disk  persistent volume claim. This PVC utilizes the built-in  managed-csi-premium  storage class, which uses  Premium_LRS  storage. \n \n apiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n name: lrs-pvc-azure-disk\nspec:\n accessModes:\n - ReadWriteOnce\n resources:\n requests:\n storage: 10Gi\n storageClassName: managed-csi-premium \n \n Instead, the following YAML manifest defines the  zrs-pvc-azure-disk  persistent volume claim. This PVC references the user-defined  managed-csi-premium-zrs  storage class, which employs  Premium_ZRS  storage. \n \n apiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n name: zrs-pvc-azure-disk\nspec:\n accessModes:\n - ReadWriteOnce\n resources:\n requests:\n storage: 10Gi\n storageClassName: managed-csi-premium-zrs \n \n The following YAML manifest defines the  lrs-nginx  deployment. Here are some key observations: \n   \n \n The deployment consists of a single replica pod. \n The Pod Topology Spread Constraints is configured to span pod replicas across availability zones, as well as across different nodes within a single availability zone. \n The deployment uses a the  lrs-pvc-azure-disk  persistent volume claim to create and attach a zonal LRS Premium SSD managed disk in the same availability zone as the mounting pod. The Azure disk is created in the node resource group which contains all of the infrastructure resources associated with the AKS cluster. The managed disk has the same name of the corresponding Kubernetes persistent volume. \n \n \n apiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: lrs-nginx\nspec:\n replicas: 1\n selector:\n matchLabels:\n app: lrs-nginx\n template:\n metadata:\n labels:\n app: lrs-nginx\n spec:\n topologySpreadConstraints:\n - maxSkew: 1\n topologyKey: topology.kubernetes.io/zone\n whenUnsatisfiable: DoNotSchedule\n labelSelector:\n matchLabels:\n app: lrs-nginx\n - maxSkew: 1\n topologyKey: kubernetes.io/hostname\n whenUnsatisfiable: DoNotSchedule\n labelSelector:\n matchLabels:\n app: lrs-nginx\n nodeSelector:\n \"kubernetes.io/os\": linux\n containers:\n - image: mcr.microsoft.com/oss/nginx/nginx:1.17.3-alpine\n name: nginx-azuredisk\n resources:\n requests:\n memory: \"64Mi\"\n cpu: \"125m\"\n limits:\n memory: \"128Mi\"\n cpu: \"250m\"\n command:\n - \"/bin/sh\"\n - \"-c\"\n - while true; do echo $(date) >> /mnt/azuredisk/outfile; sleep 1; done\n volumeMounts:\n - name: lrs-azure-disk\n mountPath: \"/mnt/azuredisk\"\n readOnly: false\n volumes:\n - name: lrs-azure-disk\n persistentVolumeClaim:\n claimName: lrs-pvc-azure-disk \n \n The following YAML manifest defines the  zrs-nginx  deployment. Here are some important observations: \n   \n \n The deployment consists of a single pod replica. \n Pod Topology Spread Constraints are configured to distribute pod replicas across availability zones and different nodes within a single availability zone. \n The deployment uses the  zrs-pvc-azure-disk  persistent volume claim to create and attach a zonal ZRS Premium SSD managed disk. This disk is replicated across three availability zones. The Azure disk is created in the node resource group, which contains all the infrastructure resources associated with the AKS cluster. The managed disk has the same name as the corresponding Kubernetes persistent volume. \n \n \n apiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: zrs-nginx\nspec:\n replicas: 1\n selector:\n matchLabels:\n app: zrs-nginx\n template:\n metadata:\n labels:\n app: zrs-nginx\n spec:\n topologySpreadConstraints:\n - maxSkew: 1\n topologyKey: topology.kubernetes.io/zone\n whenUnsatisfiable: DoNotSchedule\n labelSelector:\n matchLabels:\n app: zrs-nginx\n - maxSkew: 1\n topologyKey: kubernetes.io/hostname\n whenUnsatisfiable: DoNotSchedule\n labelSelector:\n matchLabels:\n app: zrs-nginx\n nodeSelector:\n \"kubernetes.io/os\": linux\n containers:\n - image: mcr.microsoft.com/oss/nginx/nginx:1.17.3-alpine\n name: nginx-azuredisk\n resources:\n requests:\n memory: \"64Mi\"\n cpu: \"125m\"\n limits:\n memory: \"128Mi\"\n cpu: \"250m\"\n command:\n - \"/bin/sh\"\n - \"-c\"\n - while true; do echo $(date) >> /mnt/azuredisk/outfile; sleep 1; done\n volumeMounts:\n - name: zrs-azure-disk\n mountPath: \"/mnt/azuredisk\"\n readOnly: false\n volumes:\n - name: zrs-azure-disk\n persistentVolumeClaim:\n claimName: zrs-pvc-azure-disk \n \n Please note that the system-mode node pool is tainted with  CriticalAddonsOnly=true:NoSchedule . This taint prevents pods without the corresponding toleration from running on the agent nodes of this node pool. In our test deployments, we did not include this toleration. Therefore, when we create these deployments, the Kubernetes scheduler will place their pods on the agent nodes of the  user  node pool, which does not have any taint. \n The diagram below illustrates how the pods are distributed across the agent nodes of a zone-redundant node pool, along with the corresponding Locally Redundant Storage (LRS) and Zone-Redundant Storage (ZRS) managed disks. \n Here are some important observations: \n   \n \n Locally redundant storage (LRS) replicates Azure Disk data three times within a single data center, hence within a single availability zone. LRS is the most cost-effective redundancy option but offers less durability compared to other options. While LRS protects against server rack and drive failures, a disaster within the data center could result in the loss or unrecoverability of all replicas of a storage account using LRS. \n Zone-redundant storage (ZRS) replicates Azure Disk data synchronously across three Azure availability zones within the same region. With ZRS, your data remains accessible for both read and write operations even if one zone becomes unavailable. However, during zone unavailability, Azure may perform networking updates such as DNS repointing, which could temporarily impact your application. To design applications for ZRS, it is advised to follow best practices for handling transient faults, including implementing retry policies with exponential back-off. \n \n   \n Run the following command to retrieve information about the nodes in your Kubernetes cluster, including additional labels related to region and zone topology. \n \n kubectl get nodes -L kubernetes.azure.com/agentpool,topology.kubernetes.io/region,topology.kubernetes.io/zone \n \n The command should return a tabular output like the following that includes information about each node in the cluster, with additional columns for the specified labels  kubernetes.azure.com/agentpool ,  topology.kubernetes.io/region , and  topology.kubernetes.io/zone . \n \n NAME STATUS ROLES AGE VERSION AGENTPOOL REGION ZONE\naks-system-26825036-vmss000000 Ready agent 22h v1.28.3 system westeurope westeurope-1\naks-system-26825036-vmss000001 Ready agent 22h v1.28.3 system westeurope westeurope-2\naks-system-26825036-vmss000002 Ready agent 22h v1.28.3 system westeurope westeurope-3\naks-user-27342081-vmss000000 Ready agent 22h v1.28.3 user westeurope westeurope-1\naks-user-27342081-vmss000001 Ready agent 22h v1.28.3 user westeurope westeurope-2\naks-user-27342081-vmss000002 Ready agent 22h v1.28.3 user westeurope westeurope-3 \n \n You can note that the agent nodes of the  user  zone-redundant node pool are located in different availability zones. Now run the following  kubectl  command that returns information about the pods in the  disk-test  namespace. \n \n kubectl get pod -o=custom-columns=NAME:.metadata.name,STATUS:.status.phase,IP:.status.podIP,HOSTIP:.status.hostIP,NODE:.spec.nodeName -n disk-test \n \n This command provides information on the pods' names and private IP addresses, as well as the hosting node's name and private IP address. The two pods were scheduled to run on two agent nodes, each in a separate availability zone: \n \n NAME STATUS IP HOSTIP NODE\nlrs-nginx-5bc4498b56-9kb6k Running 10.242.0.62 10.241.0.4 aks-user-27342081-vmss000000\nzrs-nginx-b86595984-ctfr2 Running 10.242.0.97 10.241.0.5 aks-user-27342081-vmss000001 \n \n Let's observe the behavior when simulating a failure of the availability zones hosting the two pods. Since the cluster consists of three nodes, each in a separate availability zone, we can simulate an availability zone failure by cordoning and draining the nodes that host the two pods. This will force the Kubernetes scheduler to reschedule the pods on agent nodes in a different availability zone. To achieve this, we can use the Kubernetes concepts of cordon and drain. Cordoning a node marks it as unschedulable, preventing new pods from being scheduled on that node. Draining a node gracefully evicts all pods running on the node, causing them to be rescheduled on other available nodes. By cordoning and draining the nodes hosting the two pods, we can simulate an availability zone failure and observe how the Kubernetes scheduler handles rescheduling the pods in different availability zones. You can run the following script to cordon and drain the nodes hosting the  lrs-nginx-*  and  zrs-nginx-*  pods: \n \n #!/bin/bash\n\n# Variables\nsource ./00-variables.sh\n\nfor appLabel in ${appLabels[@]}; do\n # Retrieve the name of the node running the pod\n echo \"Retrieving the name of the pod with app=[$appLabel] label...\"\n podName=$(kubectl get pods -l app=$appLabel -n $namespace -o jsonpath='{.items[*].metadata.name}')\n\n if [ -n \"$podName\" ]; then\n echo \"Successfully retrieved the [$podName] pod name for the [$appLabel] app label\"\n else\n echo \"Failed to retrieve the pod name for the [$appLabel] app label\"\n exit 1\n fi\n\n # Retrieve the name of the node running the pod\n nodeName=$(kubectl get pods -l app=$appLabel -n $namespace -o jsonpath='{.items[*].spec.nodeName}')\n\n if [ -n \"$nodeName\" ]; then\n echo \"The [$podName] pd runs on the [$nodeName] agent node\"\n else\n echo \"Failed to retrieve the name of the node running the [$podName] pod\"\n exit 1\n fi\n \n # Retrieve the availability zone of the node running the pod\n agentPoolZone=$(kubectl get nodes $nodeName -o jsonpath='{.metadata.labels.topology\\.kubernetes\\.io/zone}')\n\n if [ -n \"$agentPoolZone\" ]; then\n echo \"The [$nodeName] agent node is in the [$agentPoolZone] availability zone\"\n else\n echo \"Failed to retrieve the availability zone of the [$nodeName] agent node\"\n exit 1\n fi\n\n # Retrieve the name of the agent pool for the node running the pod\n agentPoolName=$(kubectl get nodes $nodeName -o jsonpath='{.metadata.labels.agentpool}')\n\n if [ -n \"$agentPoolName\" ]; then\n echo \"The [$nodeName] agent node belongs to the [$agentPoolName] agent pool\"\n else\n echo \"Failed to retrieve the name of the agent pool for the [$nodeName] agent node\"\n exit 1\n fi\n\n # Cordon the node running the pod\n echo \"Cordoning the [$nodeName] node...\"\n kubectl cordon $nodeName\n\n # Drain the node running the pod\n echo \"Draining the [$nodeName] node...\"\n kubectl drain $nodeName --ignore-daemonsets --delete-emptydir-data --force\ndone \n \n The  kubectl cordon  command is used to mark a node as unschedulable, preventing new pods from being scheduled on that node. Here's an explanation of the options you mentioned: \n   \n \n --ignore-daemonsets : This option allows you to ignore DaemonSet-managed pods when cordoning a node. By default,  kubectl cordon  will not cordoned a node if there are any DaemonSet pods running on it. However, using  --ignore-daemonsets  will bypass this check and cordoned the node regardless of any DaemonSet pods present. This can be useful in certain scenarios where you still want to cordoned a node even if DaemonSet pods are running on it. \n --delete-emptydir-data : This option determines whether to delete the contents of emptyDir volumes on the node when cordoning it. By default, emptyDir volumes are not deleted when a node is cordoned, and the data within those volumes is preserved. However, using the  --delete-emptydir-data  flag will cause the data within emptyDir volumes to be deleted when the node is cordoned. This can be helpful if you want to clean up the data within emptyDir volumes before evacuating a node. \n \n   \n It's important to note that caution should be exercised when cordoning a node, especially when using these options. Careful consideration should be given to the impact on running pods and the data they may contain. The script execution will produce an output similar to the following. This output indicates the steps taken to simulate an availability zone failure. It shows the cordoning and draining of each node hosting one of the two pods. \n \n Successfully retrieved the [lrs-nginx-5bc4498b56-9kb6k] pod name for the [lrs-nginx] app label\nThe [lrs-nginx-5bc4498b56-9kb6k] pd runs on the [aks-user-27342081-vmss000000] agent node\nThe [aks-user-27342081-vmss000000] agent node is in the [westeurope-1] availability zone\nThe [aks-user-27342081-vmss000000] agent node belongs to the [user] agent pool\nCordoning the [aks-user-27342081-vmss000000] node...\nnode/aks-user-27342081-vmss000000 cordoned\nDraining the [aks-user-27342081-vmss000000] node...\nnode/aks-user-27342081-vmss000000 already cordoned\nWarning: ignoring DaemonSet-managed Pods: kube-system/ama-logs-dqqxw, kube-system/azure-cns-vn58p, kube-system/azure-npm-7zkpm, kube-system/cloud-node-manager-dn5nn, kube-system/csi-azuredisk-node-7qngx, kube-system/csi-azurefile-node-6ch9q, kube-system/kube-proxy-cq2t8, kube-system/microsoft-defender-collector-ds-tc2mj, kube-system/microsoft-defender-publisher-ds-cpnd6\nevicting pod disk-test/zne-nginx-01-5f8d87566-hpzh8\nevicting pod disk-test/lrs-nginx-5bc4498b56-9kb6k\npod/lrs-nginx-5bc4498b56-9kb6k evicted\nnode/aks-user-27342081-vmss000000 drained\nSuccessfully retrieved the [zrs-nginx-b86595984-ctfr2] pod name for the [zrs-nginx] app label\nThe [zrs-nginx-b86595984-ctfr2] pd runs on the [aks-user-27342081-vmss000001] agent node\nThe [aks-user-27342081-vmss000001] agent node is in the [westeurope-2] availability zone\nThe [aks-user-27342081-vmss000001] agent node belongs to the [user] agent pool\nCordoning the [aks-user-27342081-vmss000001] node...\nnode/aks-user-27342081-vmss000001 cordoned\nDraining the [aks-user-27342081-vmss000001] node...\nnode/aks-user-27342081-vmss000001 already cordoned\nWarning: ignoring DaemonSet-managed Pods: kube-system/ama-logs-ncsv5, kube-system/azure-cns-vbh6q, kube-system/azure-npm-rjk7r, kube-system/cloud-node-manager-579lc, kube-system/csi-azuredisk-node-6hllf, kube-system/csi-azurefile-node-84z82, kube-system/kube-proxy-8q6kh, kube-system/microsoft-defender-collector-ds-tjdwd, kube-system/microsoft-defender-publisher-ds-cfzqf\nevicting pod disk-test/zrs-nginx-b86595984-ctfr2\nevicting pod disk-test/zne-nginx-02-7fb7769948-j4vjj\npod/zrs-nginx-b86595984-ctfr2 evicted\nnode/aks-user-27342081-vmss000001 drained \n \n Run the following command to retrieve information about the nodes in your Kubernetes cluster, including additional labels related to region and zone topology. \n \n kubectl get nodes -L kubernetes.azure.com/agentpool,topology.kubernetes.io/region,topology.kubernetes.io/zone \n \n The command should return a tabular output like the following: \n \n NAME STATUS ROLES AGE VERSION AGENTPOOL REGION ZONE\naks-system-26825036-vmss000000 Ready agent 22h v1.28.3 system westeurope westeurope-1\naks-system-26825036-vmss000001 Ready agent 22h v1.28.3 system westeurope westeurope-2\naks-system-26825036-vmss000002 Ready agent 22h v1.28.3 system westeurope westeurope-3\naks-user-27342081-vmss000000 Ready,SchedulingDisabled agent 22h v1.28.3 user westeurope westeurope-1\naks-user-27342081-vmss000001 Ready,SchedulingDisabled agent 22h v1.28.3 user westeurope westeurope-2\naks-user-27342081-vmss000002 Ready agent 22h v1.28.3 user westeurope westeurope-3 \n \n From the output, you can observe that the nodes that were previously running the  lrs-nginx-*  and  zrs-nginx-*  pods are now in a  SchedulingDisabled  status. This indicates that the Kubernetes scheduler is unable to schedule new pods onto these nodes. However, the  aks-user-27342081-vmss000002  is still in a  Ready  status, allowing it to accept new pod assignments. Now run the following  kubectl  command that returns information about the pods in the  disk-test  namespace. \n \n kubectl get pod -o=custom-columns=NAME:.metadata.name,STATUS:.status.phase,IP:.status.podIP,HOSTIP:.status.hostIP,NODE:.spec.nodeName -n disk-test \n \n The  lrs-nginx-*  pod is now in a  Pending  status, while the  zrs-nginx-*  pod runs on the only node in the  user  node pool in a  Ready  status. \n \n NAME STATUS IP HOSTIP NODE\nlrs-nginx-5bc4498b56-744wd Pending <none> <none> <none>\nzrs-nginx-b86595984-mwnkn Running 10.242.0.77 10.241.0.6 aks-user-27342081-vmss000002 \n \n The following diagram shows what happened to the pods after their hosting nodes were cordoned and drained. \n   \n The  lrs-nginx-*  ended up in a  Pending  status because the Kubernetes scheduler couldn't find a node where to run it. In fact, the pod needs to mount the LRS Azure disk, but there are no nodes in a  ready  status in the availability zone hosting the disk. Actually, there is a cluster node in this availability zone, but this node is part of the system-node pool that is tainted with  CriticalAddonsOnly=true:NoSchedule  and the pod doen't have the necessary toleration for this taint. The following command can provide more information on why the pod ended up in a  Pending  status. \n \n kubectl describe pod -l app=lrs-nginx -n disk-test \n \n This command should return an output like the following. The  Events  section specifies the reasons why the pod could not be scheduled on any node. \n \n Name: lrs-nginx-5bc4498b56-744wd\nNamespace: disk-test\nPriority: 0\nService Account: default\nNode: <none>\nLabels: app=lrs-nginx\n pod-template-hash=5bc4498b56\nAnnotations: <none>\nStatus: Pending\nIP:\nIPs: <none>\nControlled By: ReplicaSet/lrs-nginx-5bc4498b56\nContainers:\n nginx-azuredisk:\n Image: mcr.microsoft.com/oss/nginx/nginx:1.17.3-alpine\n Port: <none>\n Host Port: <none>\n Command:\n /bin/sh\n -c\n while true; do echo $(date) >> /mnt/azuredisk/outfile; sleep 1; done\n Limits:\n cpu: 250m\n memory: 128Mi\n Requests:\n cpu: 125m\n memory: 64Mi\n Environment: <none>\n Mounts:\n /mnt/azuredisk from lrs-azure-disk (rw)\n /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-bl5r9 (ro)\nConditions:\n Type Status\n PodScheduled False\nVolumes:\n lrs-azure-disk:\n Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)\n ClaimName: lrs-pvc-azure-disk\n ReadOnly: false\n kube-api-access-bl5r9:\n Type: Projected (a volume that contains injected data from multiple sources)\n TokenExpirationSeconds: 3607\n ConfigMapName: kube-root-ca.crt\n ConfigMapOptional: <nil>\n DownwardAPI: true\nQoS Class: Burstable\nNode-Selectors: kubernetes.io/os=linux\nTolerations: node.kubernetes.io/memory-pressure:NoSchedule op=Exists\n node.kubernetes.io/not-ready:NoExecute op=Exists for 300s\n node.kubernetes.io/unreachable:NoExecute op=Exists for 300s\nTopology Spread Constraints: kubernetes.io/hostname:DoNotSchedule when max skew 1 is exceeded for selector app=lrs-nginx\n topology.kubernetes.io/zone:DoNotSchedule when max skew 1 is exceeded for selector app=lrs-nginx\nEvents:\n Type Reason Age From Message\n ---- ------ ---- ---- -------\n Warning FailedScheduling 2m25s default-scheduler 0/6 nodes are available: 1 node(s) were unschedulable, 2 node(s) had volume node affinity conflict, 3 node(s) had untolerated taint {CriticalAddonsOnly: true}. preemption: 0/6 nodes are available: 6 Preemption is not helpful for scheduling..\n Normal NotTriggerScaleUp 2m23s cluster-autoscaler pod didn't trigger scale-up: 1 node(s) had untolerated taint {CriticalAddonsOnly: true} \n \n A persistent volume can specify node affinity to define constraints that limit what nodes this volume can be accessed from. Pods that use a persistent volume will only be scheduled to nodes that are selected by the node affinity constraint. To specify node affinity, set  nodeAffinity  in the  .spec  of a persistent volume. The PersistentVolume API reference has more details on this field. You can run the following command to retrieve the definition of the LRS persistent volume in YAML format: \n \n kubectl get pv $(kubectl get pv -o jsonpath='{.items[?(@.spec.claimRef.name==\"lrs-pvc-azure-disk\")].metadata.name}') -o yaml \n \n The output shows that the  nodeAffinity  constraint requires the mounting pod to be in the  westeurope-1  zone. \n \n apiVersion: v1\nkind: PersistentVolume\nmetadata:\n annotations:\n pv.kubernetes.io/provisioned-by: disk.csi.azure.com\n volume.kubernetes.io/provisioner-deletion-secret-name: \"\"\n volume.kubernetes.io/provisioner-deletion-secret-namespace: \"\"\n creationTimestamp: \"2024-01-16T13:41:45Z\"\n finalizers:\n - external-provisioner.volume.kubernetes.io/finalizer\n - kubernetes.io/pv-protection\n - external-attacher/disk-csi-azure-com\n name: pvc-96c8f65f-2d4d-4156-b0a2-0d2aa9847e8e\n resourceVersion: \"53017\"\n uid: ce4e9bd4-29e9-49d5-9552-c5a2e133b794\nspec:\n accessModes:\n - ReadWriteOnce\n capacity:\n storage: 10Gi\n claimRef:\n apiVersion: v1\n kind: PersistentVolumeClaim\n name: lrs-pvc-azure-disk\n namespace: disk-test\n resourceVersion: \"52990\"\n uid: 96c8f65f-2d4d-4156-b0a2-0d2aa9847e8e\n csi:\n driver: disk.csi.azure.com\n volumeAttributes:\n csi.storage.k8s.io/pv/name: pvc-96c8f65f-2d4d-4156-b0a2-0d2aa9847e8e\n csi.storage.k8s.io/pvc/name: lrs-pvc-azure-disk\n csi.storage.k8s.io/pvc/namespace: disk-test\n requestedsizegib: \"10\"\n skuname: Premium_LRS\n storage.kubernetes.io/csiProvisionerIdentity: 1705401468994-2380-disk.csi.azure.com\n volumeHandle: /subscriptions/1a45a694-ae23-4650-9774-89a571c462f6/resourceGroups/mc_horusrg_horusaks_westeurope/providers/Microsoft.Compute/disks/pvc-96c8f65f-2d4d-4156-b0a2-0d2aa9847e8e\n nodeAffinity:\n required:\n nodeSelectorTerms:\n - matchExpressions:\n - key: topology.disk.csi.azure.com/zone\n operator: In\n values:\n - westeurope-1\n persistentVolumeReclaimPolicy: Delete\n storageClassName: managed-csi-premium\n volumeMode: Filesystem\nstatus:\n phase: Bound \n \n Instead, the  zrs-nginx-*  pod was rescheduled by the Kubernetes scheduler to a node in a different availability zone. To retrieve the definition of the ZRS persistent volume in YAML format, use the following command: \n \n kubectl get pv $(kubectl get pv -o jsonpath='{.items[?(@.spec.claimRef.name==\"zrs-pvc-azure-disk\")].metadata.name}') -o yaml \n \n In the output, you can see that the  nodeAffinity  constraint allows the mouting pod to run in any zone or be zone-unaware: \n \n apiVersion: v1\nkind: PersistentVolume\nmetadata:\n annotations:\n pv.kubernetes.io/provisioned-by: disk.csi.azure.com\n volume.kubernetes.io/provisioner-deletion-secret-name: \"\"\n volume.kubernetes.io/provisioner-deletion-secret-namespace: \"\"\n creationTimestamp: \"2024-01-16T13:41:45Z\"\n finalizers:\n - external-provisioner.volume.kubernetes.io/finalizer\n - kubernetes.io/pv-protection\n - external-attacher/disk-csi-azure-com\n name: pvc-8d19543a-e725-4b80-b304-2150895e7559\n resourceVersion: \"53035\"\n uid: 59ec6f34-2bcb-493f-96df-8c4d66dff9db\nspec:\n accessModes:\n - ReadWriteOnce\n capacity:\n storage: 10Gi\n claimRef:\n apiVersion: v1\n kind: PersistentVolumeClaim\n name: zrs-pvc-azure-disk\n namespace: disk-test\n resourceVersion: \"52998\"\n uid: 8d19543a-e725-4b80-b304-2150895e7559\n csi:\n driver: disk.csi.azure.com\n volumeAttributes:\n csi.storage.k8s.io/pv/name: pvc-8d19543a-e725-4b80-b304-2150895e7559\n csi.storage.k8s.io/pvc/name: zrs-pvc-azure-disk\n csi.storage.k8s.io/pvc/namespace: disk-test\n requestedsizegib: \"10\"\n skuname: Premium_ZRS\n storage.kubernetes.io/csiProvisionerIdentity: 1705401468994-2380-disk.csi.azure.com\n volumeHandle: /subscriptions/1a45a694-ae23-4650-9774-89a571c462f6/resourceGroups/mc_horusrg_horusaks_westeurope/providers/Microsoft.Compute/disks/pvc-8d19543a-e725-4b80-b304-2150895e7559\n nodeAffinity:\n required:\n nodeSelectorTerms:\n - matchExpressions:\n - key: topology.disk.csi.azure.com/zone\n operator: In\n values:\n - westeurope-1\n - matchExpressions:\n - key: topology.disk.csi.azure.com/zone\n operator: In\n values:\n - westeurope-2\n - matchExpressions:\n - key: topology.disk.csi.azure.com/zone\n operator: In\n values:\n - westeurope-3\n - matchExpressions:\n - key: topology.disk.csi.azure.com/zone\n operator: In\n values:\n - \"\"\n persistentVolumeReclaimPolicy: Delete\n storageClassName: managed-csi-premium-zrs\n volumeMode: Filesystem\nstatus:\n phase: Bound \n \n To obtain the JSON definition of the LRS and ZRS Azure Disks in the node resource group of your AKS cluster, you can execute the following script: \n \n #!/bin/bash\n\n# Get all persistent volumes\npvs=$(kubectl get pv -o json -n disk-test)\n\n# Loop over pvs\nfor pv in $(echo \"${pvs}\" | jq -r '.items[].metadata.name'); do\n # Retrieve the resource id of the managed disk from the persistent volume\n echo \"Retrieving the resource id of the managed disk from the [$pv] persistent volume...\"\n diskId=$(kubectl get pv $pv -n disk-test -o jsonpath='{.spec.csi.volumeHandle}')\n\n if [ -n \"$diskId\" ]; then\n diskName=$(basename $diskId)\n echo \"Successfully retrieved the resource id of the [$diskName] managed disk from the [$pv] persistent volume\"\n else\n echo \"Failed to retrieve the resource id of the managed disk from the [$pv] persistent volume\"\n exit 1\n fi\n\n # Retrieve the managed disk from Azure\n echo \"Retrieving the [$diskName] managed disk from Azure...\"\n disk=$(az disk show \\\n --ids $diskId \\\n --output json \\\n --only-show-errors)\n \n if [ -n \"$disk\" ]; then\n echo \"Successfully retrieved the [$diskName] managed disk from Azure\"\n echo \"[$diskName] managed disk details:\"\n echo $disk | jq -r\n else\n echo \"Failed to retrieve the [$diskName] managed disk from Azure\"\n exit 1\n fi\ndone \n \n The following table displays the JSON definition of the Azure Disk used by the LRS persistent volume. Note that this disk utilizes  Premium_LRS  storage and is created in a designated zone,  1  in this case. This explains why a LRS disk can be mounted only by a pod running in a node in the same availability zone. \n \n {\n \"LastOwnershipUpdateTime\": \"2024-01-16T14:06:25.4114608+00:00\",\n \"creationData\": {\n \"createOption\": \"Empty\"\n },\n \"diskIOPSReadWrite\": 120,\n \"diskMBpsReadWrite\": 25,\n \"diskSizeBytes\": 10737418240,\n \"diskSizeGB\": 10,\n \"diskState\": \"Unattached\",\n \"encryption\": {\n \"type\": \"EncryptionAtRestWithPlatformKey\"\n },\n \"id\": \"/subscriptions/1a45a694-ae23-4650-9774-89a571c462f6/resourceGroups/mc_horusrg_horusaks_westeurope/providers/Microsoft.Compute/disks/pvc-96c8f65f-2d4d-4156-b0a2-0d2aa9847e8e\",\n \"location\": \"westeurope\",\n \"name\": \"pvc-96c8f65f-2d4d-4156-b0a2-0d2aa9847e8e\",\n \"networkAccessPolicy\": \"AllowAll\",\n \"provisioningState\": \"Succeeded\",\n \"publicNetworkAccess\": \"Enabled\",\n \"resourceGroup\": \"mc_horusrg_horusaks_westeurope\",\n \"sku\": {\n \"name\": \"Premium_LRS\",\n \"tier\": \"Premium\"\n },\n \"tags\": {\n \"k8s-azure-created-by\": \"kubernetes-azure-dd\",\n \"kubernetes.io-created-for-pv-name\": \"pvc-96c8f65f-2d4d-4156-b0a2-0d2aa9847e8e\",\n \"kubernetes.io-created-for-pvc-name\": \"lrs-pvc-azure-disk\",\n \"kubernetes.io-created-for-pvc-namespace\": \"disk-test\",\n \"supportedBy\": \"Paolo Salvatori\"\n },\n \"tier\": \"P3\",\n \"timeCreated\": \"2024-01-16T13:41:42.9555129+00:00\",\n \"type\": \"Microsoft.Compute/disks\",\n \"uniqueId\": \"19218479-98f0-4880-902f-d8bfdc91423a\",\n \"zones\": [\n \"1\"\n ]\n} \n \n On the contrary, the following table contains the JSON definition of the Azure Disk used by the ZRS persistent volume. Note that this disk utilizes  Premium_ZRS  storage. \n \n {\n \"LastOwnershipUpdateTime\": \"2024-01-16T14:07:08.5986067+00:00\",\n \"creationData\": {\n \"createOption\": \"Empty\"\n },\n \"diskIOPSReadWrite\": 120,\n \"diskMBpsReadWrite\": 25,\n \"diskSizeBytes\": 10737418240,\n \"diskSizeGB\": 10,\n \"diskState\": \"Attached\",\n \"encryption\": {\n \"type\": \"EncryptionAtRestWithPlatformKey\"\n },\n \"id\": \"/subscriptions/1a45a694-ae23-4650-9774-89a571c462f6/resourceGroups/mc_horusrg_horusaks_westeurope/providers/Microsoft.Compute/disks/pvc-8d19543a-e725-4b80-b304-2150895e7559\",\n \"location\": \"westeurope\",\n \"managedBy\": \"/subscriptions/1a45a694-ae23-4650-9774-89a571c462f6/resourceGroups/MC_HorusRG_HorusAks_westeurope/providers/Microsoft.Compute/virtualMachineScaleSets/aks-user-27342081-vmss/virtualMachines/aks-user-27342081-vmss_1\",\n \"name\": \"pvc-8d19543a-e725-4b80-b304-2150895e7559\",\n \"networkAccessPolicy\": \"AllowAll\",\n \"provisioningState\": \"Succeeded\",\n \"publicNetworkAccess\": \"Enabled\",\n \"resourceGroup\": \"mc_horusrg_horusaks_westeurope\",\n \"sku\": {\n \"name\": \"Premium_ZRS\",\n \"tier\": \"Premium\"\n },\n \"tags\": {\n \"k8s-azure-created-by\": \"kubernetes-azure-dd\",\n \"kubernetes.io-created-for-pv-name\": \"pvc-8d19543a-e725-4b80-b304-2150895e7559\",\n \"kubernetes.io-created-for-pvc-name\": \"zrs-pvc-azure-disk\",\n \"kubernetes.io-created-for-pvc-namespace\": \"disk-test\",\n \"supportedBy\": \"Paolo Salvatori\"\n },\n \"tier\": \"P3\",\n \"timeCreated\": \"2024-01-16T13:41:43.5961333+00:00\",\n \"type\": \"Microsoft.Compute/disks\",\n \"uniqueId\": \"9cd4bfce-8076-4a88-8db0-aa60d685736b\"\n} \n \n A ZRS managed disk can be attached by a virtual machines in a different availability zone. In our sample, a ZRS Azure Disk can be mounted by a pod running in any node in a zone-redundant node pool. ZRS disks are currently not available an all the Azure regions. For more information on ZRS disks, see Zone Redundant Storage (ZRS) option for Azure Disks for high availability. \n You can run the following script to uncordon the nodes. Once the nodes are back in a  Ready  state, the Kubernetes scheduler will be able to run the  lrs-nginx-*  pod on the node in the availability zone that matches the topology criteria of the required persistent volume. \n \n #!/bin/bash\n\n# Get all nodes\nnodes=$(kubectl get nodes -o json)\n\n# Loop over nodes\nfor node in $(echo \"${nodes}\" | jq -r '.items[].metadata.name'); do\n # Check if node is cordoned\n if kubectl get node \"${node}\" | grep -q \"SchedulingDisabled\"; then\n # Uncordon node\n echo \"Uncordoning node ${node}...\"\n kubectl uncordon \"${node}\"\n fi\ndone \n \n AKS cluster with Zonal Node Pools \n The second strategy involves deploying an AKS cluster with three user-mode node pools, each assigned to a different availability zone within the current region. The diagram below illustrates an AKS cluster with a system-mode node pool and three zonal user-mode node pools, each located in a separate availability zone. The cluster is configured to use Azure CNI networking with Dynamic IP allocation and enhanced subnet support. \n   \n Deploy an AKS cluster with Zonal Node Pools using Azure CLI \n The following bash script uses Azure CLI to create the AKS cluster represented in picture above. When creating a cluster using the az aks create command, the  --zones  parameter allows you to specify the availability zones for deploying agent nodes. However, it's important to note that this parameter does not control the deployment of managed control plane components. These components are automatically distributed across all available zones in the region during cluster deployment. \n \n #!/bin/bash\n\n# Variables\nsource ./00-variables.sh\n\n# Check if the resource group already exists\necho \"Checking if [\"$resourceGroupName\"] resource group actually exists in the [$subscriptionName] subscription...\"\n\naz group show --name $resourceGroupName --only-show-errors &>/dev/null\n\nif [[ $? != 0 ]]; then\n echo \"No [\"$resourceGroupName\"] resource group actually exists in the [$subscriptionName] subscription\"\n echo \"Creating [\"$resourceGroupName\"] resource group in the [$subscriptionName] subscription...\"\n\n # create the resource group\n az group create \\\n --name $resourceGroupName \\\n --location $location \\\n --only-show-errors 1>/dev/null\n\n if [[ $? == 0 ]]; then\n echo \"[\"$resourceGroupName\"] resource group successfully created in the [$subscriptionName] subscription\"\n else\n echo \"Failed to create [\"$resourceGroupName\"] resource group in the [$subscriptionName] subscription\"\n exit -1\n fi\nelse\n echo \"[\"$resourceGroupName\"] resource group already exists in the [$subscriptionName] subscription\"\nfi\n\n# Check if log analytics workspace exists and retrieve its resource id\necho \"Retrieving [\"$logAnalyticsName\"] Log Analytics resource id...\"\naz monitor log-analytics workspace show \\\n --name $logAnalyticsName \\\n --resource-group $resourceGroupName \\\n --query id \\\n --output tsv \\\n --only-show-errors &>/dev/null\n\nif [[ $? != 0 ]]; then\n echo \"No [\"$logAnalyticsName\"] log analytics workspace actually exists in the [\"$resourceGroupName\"] resource group\"\n echo \"Creating [\"$logAnalyticsName\"] log analytics workspace in the [\"$resourceGroupName\"] resource group...\"\n\n # Create the log analytics workspace\n az monitor log-analytics workspace create \\\n --name $logAnalyticsName \\\n --resource-group $resourceGroupName \\\n --identity-type SystemAssigned \\\n --sku $logAnalyticsSku \\\n --location $location \\\n --only-show-errors\n\n if [[ $? == 0 ]]; then\n echo \"[\"$logAnalyticsName\"] log analytics workspace successfully created in the [\"$resourceGroupName\"] resource group\"\n else\n echo \"Failed to create [\"$logAnalyticsName\"] log analytics workspace in the [\"$resourceGroupName\"] resource group\"\n exit -1\n fi\nelse\n echo \"[\"$logAnalyticsName\"] log analytics workspace already exists in the [\"$resourceGroupName\"] resource group\"\nfi\n\n# Retrieve the log analytics workspace id\nworkspaceResourceId=$(az monitor log-analytics workspace show \\\n --name $logAnalyticsName \\\n --resource-group $resourceGroupName \\\n --query id \\\n --output tsv \\\n --only-show-errors 2>/dev/null)\n\nif [[ -n $workspaceResourceId ]]; then\n echo \"Successfully retrieved the resource id for the [\"$logAnalyticsName\"] log analytics workspace\"\nelse\n echo \"Failed to retrieve the resource id for the [\"$logAnalyticsName\"] log analytics workspace\"\n exit -1\nfi\n\n# Check if the client virtual network already exists\necho \"Checking if [$virtualNetworkName] virtual network actually exists in the [$resourceGroupName] resource group...\"\naz network vnet show \\\n --name $virtualNetworkName \\\n --resource-group $resourceGroupName \\\n --only-show-errors &>/dev/null\n\nif [[ $? != 0 ]]; then\n echo \"No [$virtualNetworkName] virtual network actually exists in the [$resourceGroupName] resource group\"\n echo \"Creating [$virtualNetworkName] virtual network in the [$resourceGroupName] resource group...\"\n\n # Create the client virtual network\n az network vnet create \\\n --name $virtualNetworkName \\\n --resource-group $resourceGroupName \\\n --location $location \\\n --address-prefixes $virtualNetworkAddressPrefix \\\n --subnet-name $systemSubnetName \\\n --subnet-prefix $systemSubnetPrefix \\\n --only-show-errors 1>/dev/null\n\n if [[ $? == 0 ]]; then\n echo \"[$virtualNetworkName] virtual network successfully created in the [$resourceGroupName] resource group\"\n else\n echo \"Failed to create [$virtualNetworkName] virtual network in the [$resourceGroupName] resource group\"\n exit -1\n fi\nelse\n echo \"[$virtualNetworkName] virtual network already exists in the [$resourceGroupName] resource group\"\nfi\n\n# Check if the user subnet already exists\necho \"Checking if [$userSubnetName] user subnet actually exists in the [$virtualNetworkName] virtual network...\"\naz network vnet subnet show \\\n --name $userSubnetName \\\n --vnet-name $virtualNetworkName \\\n --resource-group $resourceGroupName \\\n --only-show-errors &>/dev/null\n\nif [[ $? != 0 ]]; then\n echo \"No [$userSubnetName] user subnet actually exists in the [$virtualNetworkName] virtual network\"\n echo \"Creating [$userSubnetName] user subnet in the [$virtualNetworkName] virtual network...\"\n\n # Create the user subnet\n az network vnet subnet create \\\n --name $userSubnetName \\\n --vnet-name $virtualNetworkName \\\n --resource-group $resourceGroupName \\\n --address-prefix $userSubnetPrefix \\\n --only-show-errors 1>/dev/null\n\n if [[ $? == 0 ]]; then\n echo \"[$userSubnetName] user subnet successfully created in the [$virtualNetworkName] virtual network\"\n else\n echo \"Failed to create [$userSubnetName] user subnet in the [$virtualNetworkName] virtual network\"\n exit -1\n fi\nelse\n echo \"[$userSubnetName] user subnet already exists in the [$virtualNetworkName] virtual network\"\nfi\n\n# Check if the pod subnet already exists\necho \"Checking if [$podSubnetName] pod subnet actually exists in the [$virtualNetworkName] virtual network...\"\naz network vnet subnet show \\\n --name $podSubnetName \\\n --vnet-name $virtualNetworkName \\\n --resource-group $resourceGroupName \\\n --only-show-errors &>/dev/null\n\nif [[ $? != 0 ]]; then\n echo \"No [$podSubnetName] pod subnet actually exists in the [$virtualNetworkName] virtual network\"\n echo \"Creating [$podSubnetName] pod subnet in the [$virtualNetworkName] virtual network...\"\n\n # Create the pod subnet\n az network vnet subnet create \\\n --name $podSubnetName \\\n --vnet-name $virtualNetworkName \\\n --resource-group $resourceGroupName \\\n --address-prefix $podSubnetPrefix \\\n --only-show-errors 1>/dev/null\n\n if [[ $? == 0 ]]; then\n echo \"[$podSubnetName] pod subnet successfully created in the [$virtualNetworkName] virtual network\"\n else\n echo \"Failed to create [$podSubnetName] pod subnet in the [$virtualNetworkName] virtual network\"\n exit -1\n fi\nelse\n echo \"[$podSubnetName] pod subnet already exists in the [$virtualNetworkName] virtual network\"\nfi\n\n# Check if the bastion subnet already exists\necho \"Checking if [$bastionSubnetName] bastion subnet actually exists in the [$virtualNetworkName] virtual network...\"\naz network vnet subnet show \\\n --name $bastionSubnetName \\\n --vnet-name $virtualNetworkName \\\n --resource-group $resourceGroupName \\\n --only-show-errors &>/dev/null\n\nif [[ $? != 0 ]]; then\n echo \"No [$bastionSubnetName] bastion subnet actually exists in the [$virtualNetworkName] virtual network\"\n echo \"Creating [$bastionSubnetName] bastion subnet in the [$virtualNetworkName] virtual network...\"\n\n # Create the bastion subnet\n az network vnet subnet create \\\n --name $bastionSubnetName \\\n --vnet-name $virtualNetworkName \\\n --resource-group $resourceGroupName \\\n --address-prefix $bastionSubnetPrefix \\\n --only-show-errors 1>/dev/null\n\n if [[ $? == 0 ]]; then\n echo \"[$bastionSubnetName] bastion subnet successfully created in the [$virtualNetworkName] virtual network\"\n else\n echo \"Failed to create [$bastionSubnetName] bastion subnet in the [$virtualNetworkName] virtual network\"\n exit -1\n fi\nelse\n echo \"[$bastionSubnetName] bastion subnet already exists in the [$virtualNetworkName] virtual network\"\nfi\n\n# Retrieve the system subnet id\nsystemSubnetId=$(az network vnet subnet show \\\n --name $systemSubnetName \\\n --vnet-name $virtualNetworkName \\\n --resource-group $resourceGroupName \\\n --query id \\\n --output tsv \\\n --only-show-errors 2>/dev/null)\n\nif [[ -n $systemSubnetId ]]; then\n echo \"Successfully retrieved the resource id for the [$systemSubnetName] subnet\"\nelse\n echo \"Failed to retrieve the resource id for the [$systemSubnetName] subnet\"\n exit -1\nfi\n\n# Retrieve the user subnet id\nuserSubnetId=$(az network vnet subnet show \\\n --name $userSubnetName \\\n --vnet-name $virtualNetworkName \\\n --resource-group $resourceGroupName \\\n --query id \\\n --output tsv \\\n --only-show-errors 2>/dev/null)\n\nif [[ -n $userSubnetId ]]; then\n echo \"Successfully retrieved the resource id for the [$userSubnetName] subnet\"\nelse\n echo \"Failed to retrieve the resource id for the [$userSubnetName] subnet\"\n exit -1\nfi\n\n# Retrieve the pod subnet id\npodSubnetId=$(az network vnet subnet show \\\n --name $podSubnetName \\\n --vnet-name $virtualNetworkName \\\n --resource-group $resourceGroupName \\\n --query id \\\n --output tsv \\\n --only-show-errors 2>/dev/null)\n\nif [[ -n $podSubnetId ]]; then\n echo \"Successfully retrieved the resource id for the [$podSubnetName] subnet\"\nelse\n echo \"Failed to retrieve the resource id for the [$podSubnetName] subnet\"\n exit -1\nfi\n\n# Get the last Kubernetes version available in the region\nkubernetesVersion=$(az aks get-versions \\\n --location $location \\\n --query \"values[?isPreview==null].version | sort(@) | [-1]\" \\\n --output tsv \\\n --only-show-errors 2>/dev/null)\n\n# Create AKS cluster\necho \"Checking if [\"$aksClusterName\"] aks cluster actually exists in the [\"$resourceGroupName\"] resource group...\"\n\naz aks show --name $aksClusterName --resource-group $resourceGroupName &>/dev/null\n\nif [[ $? != 0 ]]; then\n echo \"No [\"$aksClusterName\"] aks cluster actually exists in the [\"$resourceGroupName\"] resource group\"\n echo \"Creating [\"$aksClusterName\"] aks cluster in the [\"$resourceGroupName\"] resource group...\"\n\n # Create the aks cluster\n az aks create \\\n --name $aksClusterName \\\n --resource-group $resourceGroupName \\\n --service-cidr $serviceCidr \\\n --dns-service-ip $dnsServiceIp \\\n --os-sku $osSku \\\n --node-osdisk-size $osDiskSize \\\n --node-osdisk-type $osDiskType \\\n --vnet-subnet-id $systemSubnetId \\\n --nodepool-name $systemNodePoolName \\\n --pod-subnet-id $podSubnetId \\\n --enable-cluster-autoscaler \\\n --node-count $nodeCount \\\n --min-count $minCount \\\n --max-count $maxCount \\\n --max-pods $maxPods \\\n --location $location \\\n --kubernetes-version $kubernetesVersion \\\n --ssh-key-value $sshKeyValue \\\n --node-vm-size $nodeSize \\\n --enable-addons monitoring \\\n --workspace-resource-id $workspaceResourceId \\\n --network-policy $networkPolicy \\\n --network-plugin $networkPlugin \\\n --service-cidr $serviceCidr \\\n --enable-managed-identity \\\n --enable-workload-identity \\\n --enable-oidc-issuer \\\n --enable-aad \\\n --enable-azure-rbac \\\n --aad-admin-group-object-ids $aadProfileAdminGroupObjectIDs \\\n --nodepool-taints CriticalAddonsOnly=true:NoSchedule \\\n --nodepool-labels nodePoolMode=system created=AzureCLI osDiskType=ephemeral osType=Linux --nodepool-tags osDiskType=ephemeral osDiskType=ephemeral osType=Linux \\\n --tags created=AzureCLI \\\n --cluster-autoscaler-profile balance-similar-node-groups=true \\\n --only-show-errors \\\n --zones 1 2 3 1>/dev/null\n\n if [[ $? == 0 ]]; then\n echo \"[\"$aksClusterName\"] aks cluster successfully created in the [\"$resourceGroupName\"] resource group\"\n else\n echo \"Failed to create [\"$aksClusterName\"] aks cluster in the [\"$resourceGroupName\"] resource group\"\n exit -1\n fi\nelse\n echo \"[\"$aksClusterName\"] aks cluster already exists in the [\"$resourceGroupName\"] resource group\"\nfi\n\n# Iterate from 1 to 3 to create a node pool in each availability zone\nfor ((i = 1; i <= 3; i++)); do\n userNodePoolName=${userNodePoolPrefix}$(printf \"%02d\" \"$i\")\n\n # Check if the user node pool exists\n echo \"Checking if [\"$aksClusterName\"] aks cluster actually has a user node pool...\"\n az aks nodepool show \\\n --name $userNodePoolName \\\n --cluster-name $aksClusterName \\\n --resource-group $resourceGroupName &>/dev/null\n\n if [[ $? == 0 ]]; then\n echo \"A node pool called [$userNodePoolName] already exists in the [$aksClusterName] AKS cluster\"\n else\n echo \"No node pool called [$userNodePoolName] actually exists in the [$aksClusterName] AKS cluster\"\n echo \"Creating [$userNodePoolName] node pool in the [$aksClusterName] AKS cluster...\"\n\n az aks nodepool add \\\n --name $userNodePoolName \\\n --mode $mode \\\n --cluster-name $aksClusterName \\\n --resource-group $resourceGroupName \\\n --enable-cluster-autoscaler \\\n --eviction-policy $evictionPolicy \\\n --os-type $osType \\\n --os-sku $osSku \\\n --node-vm-size $vmSize \\\n --node-osdisk-size $osDiskSize \\\n --node-osdisk-type $osDiskType \\\n --node-count $nodeCount \\\n --min-count $minCount \\\n --max-count $maxCount \\\n --max-pods $maxPods \\\n --tags osDiskType=managed osType=Linux \\\n --labels osDiskType=ephemeral osType=Linux \\\n --vnet-subnet-id $userSubnetId \\\n --pod-subnet-id $podSubnetId \\\n --labels nodePoolMode=user created=AzureCLI osDiskType=ephemeral osType=Linux --tags osDiskType=ephemeral osDiskType=ephemeral osType=Linux \\\n --zones $i 1>/dev/null\n\n if [[ $? == 0 ]]; then\n echo \"[$userNodePoolName] node pool successfully created in the [$aksClusterName] AKS cluster\"\n else\n echo \"Failed to create the [$userNodePoolName] node pool in the [$aksClusterName] AKS cluster\"\n exit -1\n fi\n fi\ndone\n\n# Use the following command to configure kubectl to connect to the new Kubernetes cluster\necho \"Getting access credentials configure kubectl to connect to the [\"$aksClusterName\"] AKS cluster...\"\naz aks get-credentials \\\n --name $aksClusterName \\\n --resource-group $resourceGroupName \\\n --overwrite-existing\n\nif [[ $? == 0 ]]; then\n echo \"Credentials for the [\"$aksClusterName\"] cluster successfully retrieved\"\nelse\n echo \"Failed to retrieve the credentials for the [\"$aksClusterName\"] cluster\"\n exit -1\nfi \n \n As you can note, the installation sets the  balance-similar-node-groups  setting of the cluster autoscaler profile to  true . For more information on this flag, see Cluster autoscaler profile settings. This ensures the autoscaler can successfully scale up and keep the sizes of the node pools balanced. If you set the flag to `true``, the cluster autoscaler will automatically identify node groups with the same instance type and the same set of labels (except for automatically added zone label) and try to keep the sizes of those node groups balanced. \n This does not guarantee similar node pools will have exactly the same sizes: \n   \n \n Currently the balancing is only done at scale-up. The cluster autoscaler will still scale down underutilized nodes regardless of the relative sizes of underlying node groups. The cluster autoscaler may take balancing into account during scale-down operations in the future. \n The cluster autoscaler will only add as many nodes as required to run all existing pods. If the number of nodes is not divisible by the number of balanced node pools, some groups will get 1 more node than others. \n The cluster autoscaler will only balance between node groups that can support the same set of pending pods. If you run pods that can only go to a single node group (for example due to nodeSelector on zone label) CA will only add nodes to this particular node group. \n \n   \n You can opt-out a node group from being automatically balanced with other node groups using the same instance type by giving it any custom label. For more information, see I'm running cluster with nodes in multiple zones for HA purposes. Is that supported by Cluster Autoscaler? \n Before running the script, make sure to define the variables that are defined in a separate file included in the script: \n   \n # Azure Kubernetes Service (AKS) cluster\nprefix=\"Amon\"\naksClusterName=\"${prefix}Aks\"\nresourceGroupName=\"${prefix}RG\"\nlocation=\"WestEurope\"\nosSku=\"AzureLinux\"\nosDiskSize=50\nosDiskType=\"Ephemeral\"\nsystemNodePoolName=\"system\"\n\n# Virtual Network\nvirtualNetworkName=\"${prefix}VNet\"\nvirtualNetworkAddressPrefix=\"10.0.0.0/8\"\nsystemSubnetName=\"SystemSubnet\"\nsystemSubnetPrefix=\"10.240.0.0/16\"\nuserSubnetName=\"UserSubnet\"\nuserSubnetPrefix=\"10.241.0.0/16\"\npodSubnetName=\"PodSubnet\"\npodSubnetPrefix=\"10.242.0.0/16\"\nbastionSubnetName=\"AzureBastionSubnet\"\nbastionSubnetPrefix=\"10.243.2.0/24\"\n\n# AKS variables\ndnsServiceIp=\"172.16.0.10\"\nserviceCidr=\"172.16.0.0/16\"\naadProfileAdminGroupObjectIDs=\"4e4d0501-e693-4f3e-965b-5bec6c410c03\"\n\n# Log Analytics\nlogAnalyticsName=\"${prefix}LogAnalytics\"\nlogAnalyticsSku=\"PerGB2018\"\n\n# Node count, node size, and ssh key location for AKS nodes\nnodeSize=\"Standard_D4ds_v4\"\n\nsshKeyValue=\"~/.ssh/id_rsa.pub\"\n\n# Network policy\nnetworkPolicy=\"azure\"\nnetworkPlugin=\"azure\"\n\n# Node count variables\nnodeCount=1\nminCount=3\nmaxCount=20\nmaxPods=100\n\n# Node pool variables\nuserNodePoolPrefix=\"user\"\nevictionPolicy=\"Delete\"\nvmSize=\"Standard_D4ds_v4\" #Standard_F8s_v2, Standard_D4ads_v5\nosType=\"Linux\"\nmode=\"User\"\n\n# SubscriptionName and tenantId of the current subscription\nsubscriptionName=$(az account show --query name --output tsv)\ntenantId=$(az account show --query tenantId --output tsv)\n\n# Kubernetes sample\nnamespace=\"disk-test\" \n   \n   \n Deploy a Workload that uses LRS Storage across Zonal Node Pools \n If you plan on using the cluster autoscaler with node pools that span multiple zones and leverage scheduling features related to zones, such as volume topological scheduling, we recommend you have one node pool per zone and enable  --balance-similar-node-groups  through the autoscaler profile. This ensures the autoscaler can successfully scale up agenbt nodes separately in each node pool and related availability zone as required and keep the sizes of the node pools balanced. When using the AKS cluster autoscaler with node pools spanning multiple availability zones, there are a few considerations to keep in mind: \n   \n \n It is recommended to create a separate node pool for each zone when using node pools that attach persistent volumes based on locally redundant storage (LSR) Azure Storage using a CSI Driver, such as Azure Disks, Azure Files, or Azure Blob Storage. This is necessary because an LRS persistent volume in one availability zone cannot be attached and accessed by a pod in another availability zone. \n If multiple node pools are created within each zone, it is recommended to enable the  --balance-similar-node-groups  property in the autoscaler profile. This feature helps identify similar node pools and ensures a balanced distribution of nodes across them. \n However, if you are not utilizing Persistent Volumes, the AKS cluster autoscaler should work without any issues with node pools that span multiple Availability Zones. \n \n   \n If you plan to deploy workloads to AKS which make use of the Azure Disks CSI Driver to create and attach Kubernetes persistent volumes based on LRS managed disks, you can use the following strategy: \n   \n \n Create a separate Kubernetes deployment for each zonal node pool. \n Use node selectors or node affinity to constraint the Kubernetes Scheduler to run the pods of each deployments on the agent nodes of a specific zonal node pool using the topology labels of the nodes. \n Create a separate persistent volume claim for each zonal deployment. \n When deploying pods to an AKS cluster that spans multiple availability zones, it is essential to ensure optimal distribution and resilience. To achieve this, you can utilize the Pod Topology Spread Constraints Kubernetes feature. By implementing Pod Topology Spread Constraints, you gain granular control over how pods are spread across your AKS cluster, taking into account failure-domains like regions, availability zones, and nodes. In this scenario, you can create constraints that span pod replicas across different nodes within the intended availability zone. \n \n   \n Test Workload resiliency of an AKS cluster with Zonal Node Pools \n In this test, we simulate a scenario where the agent nodes in a specific availability zone suddenly become unavailable due to a failure. The objective is to verify that the application continues to run successfully on the agent nodes in the other availability zones. To prevent interference from the cluster autoscaler during the test and ensure that each zonal node pool consists of exactly two agent nodes, you can execute the following bash script. This script disables the cluster autoscaler on each node pool and manually sets the number of nodes to two for each of them. \n   \n   \n #!/bin/bash\n\n# Variables\nsource ./00-variables.sh\nnodeCount=2\n\n# Iterate node pools\nfor ((i = 1; i <= 3; i++)); do\n userNodePoolName=${userNodePoolPrefix}$(printf \"%02d\" \"$i\")\n\n # Retrieve the node count for the current node pool\n # Check if the user node pool exists\n echo \"Retrieving the node count for the [$userNodePoolName] node pool...\"\n count=$(az aks nodepool show \\\n --name $userNodePoolName \\\n --cluster-name $aksClusterName \\\n --resource-group $resourceGroupName \\\n --query count \\\n --output tsv \\\n --only-show-errors)\n\n # Disable autoscaling for the current node pool\n echo \"Disabling autoscaling for the [$userNodePoolName] node pool...\"\n az aks nodepool update \\\n --cluster-name $aksClusterName \\\n --name $userNodePoolName \\\n --resource-group $resourceGroupName \\\n --disable-cluster-autoscaler \\\n --only-show-errors 1>/dev/null\n\n # Run this command only if the current node count is not equal to two\n if [[ $count -ne $nodeCount ]]; then\n # Scale the current node pool to two nodes\n echo \"Scaling the [$userNodePoolName] node pool to $nodeCount nodes...\"\n az aks nodepool scale \\\n --cluster-name $aksClusterName \\\n --name $userNodePoolName \\\n --resource-group $resourceGroupName \\\n --node-count $nodeCount \\\n --only-show-errors 1>/dev/null\n else\n echo \"The [$userNodePoolName] node pool is already scaled to $nodeCount nodes\"\n fi\ndone\n \n   \n   \n Then, you can use the following script to create the following Kubernetes objects: \n   \n \n The  disk-test  namespace. \n Three  zne-pvc-azure-disk-0*  persistent volume claims, one for each availability zone \n Three  zne-nginx-0*  deployments, one for each zonal node pool. \n \n \n #!/bin/bash\n\n# Variables\nsource ./00-variables.sh\n\n# Check if namespace exists in the cluster\nresult=$(kubectl get namespace -o jsonpath=\"{.items[?(@.metadata.name=='$namespace')].metadata.name}\")\n\nif [[ -n $result ]]; then\n echo \"$namespace namespace already exists in the cluster\"\nelse\n echo \"$namespace namespace does not exist in the cluster\"\n echo \"creating $namespace namespace in the cluster...\"\n kubectl create namespace $namespace\nfi\n\n# Create the zne-pvc-azure-disk persistent volume claim\nkubectl apply -f zne-pvc.yml -n $namespace\n\n# Create the zne-nginx-01, zne-nginx-02, and zne-nginx-03 deployments\nkubectl apply -f zne-deploy.yml -n $namespace \n \n The following YAML manifest defines three persistent volume claims (PVC), each used by the pod of a separate deployment. \n \n apiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n name: zne-pvc-azure-disk-01\nspec:\n accessModes:\n - ReadWriteOnce\n resources:\n requests:\n storage: 10Gi\n storageClassName: managed-csi-premium\n---\napiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n name: zne-pvc-azure-disk-02\nspec:\n accessModes:\n - ReadWriteOnce\n resources:\n requests:\n storage: 10Gi\n storageClassName: managed-csi-premium\n---\napiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n name: zne-pvc-azure-disk-03\nspec:\n accessModes:\n - ReadWriteOnce\n resources:\n requests:\n storage: 10Gi\n storageClassName: managed-csi-premium \n \n Each PVC uses the  managed-csi-premium  built-in storage class that leverages  Premium_LRS  storage. As mentioned earlier, LRS persistent volumes can only be attached by pods in the same availability zone. Therefore, it is necessary to create three persistent volume claims, one for each availability zone. \n \n apiVersion: storage.k8s.io/v1\nkind: StorageClass\nmetadata:\n creationTimestamp: \"2024-01-17T15:26:56Z\"\n labels:\n addonmanager.kubernetes.io/mode: EnsureExists\n kubernetes.io/cluster-service: \"true\"\n name: managed-csi-premium\n resourceVersion: \"401\"\n uid: dabacfc8-d8f5-4c8d-ac50-f53338baf31b\nparameters:\n skuname: Premium_LRS\nprovisioner: disk.csi.azure.com\nreclaimPolicy: Delete\nvolumeBindingMode: WaitForFirstConsumer \n \n The following YAML manifest defines three zonal deployments, one for each availability zone. Here are some key points to note: \n   \n \n Each deployment consists of a single replica pod. \n The  zone label  specifies the zone where the deployment is created. \n A  nodeAffinity  constraint is utilized to ensure that each deployment's pods are scheduled in separate availability zones. The  topology.kubernetes.io/zone  label specifies the availability zone for each agent node. \n In scenarios where a deployment has multiple replica pods, the  topologySpreadConstraints  are employed to distribute the pods across multiple nodes within a given availability zone. This is achieved using the  kubernetes.io/hostname  label, which identifies the host name of the agent node. For more details, refer to the Pod Topology Spread Constraints documentation. \n Each deployment uses a distinct persistent volume claim to create and attach a zonal LRS Premium SSD managed disk in the same availability zone as the mounting pod. Each disk is created in the node resource group which contains all of the infrastructure resources associated with the AKS cluster. Each disk has the same name of the corresponding Kubernetes persistent volume. \n \n \n apiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: zne-nginx-01\nspec:\n replicas: 1\n selector:\n matchLabels:\n app: zne-nginx\n zone: one\n template:\n metadata:\n labels:\n app: zne-nginx\n zone: one\n spec:\n affinity:\n nodeAffinity:\n preferredDuringSchedulingIgnoredDuringExecution:\n - weight: 1\n preference:\n matchExpressions:\n - key: topology.kubernetes.io/zone\n operator: In\n values:\n - westeurope-1\n topologySpreadConstraints:\n - maxSkew: 1\n topologyKey: kubernetes.io/hostname\n whenUnsatisfiable: DoNotSchedule\n labelSelector:\n matchLabels:\n app: zne-nginx\n nodeSelector:\n \"kubernetes.io/os\": linux\n containers:\n - image: mcr.microsoft.com/oss/nginx/nginx:1.17.3-alpine\n name: nginx-azuredisk\n resources:\n requests:\n memory: \"64Mi\"\n cpu: \"125m\"\n limits:\n memory: \"128Mi\"\n cpu: \"250m\"\n command:\n - \"/bin/sh\"\n - \"-c\"\n - while true; do echo $(date) >> /mnt/azuredisk/outfile; sleep 1; done\n volumeMounts:\n - name: zne-azure-disk-01\n mountPath: \"/mnt/azuredisk\"\n readOnly: false\n volumes:\n - name: zne-azure-disk-01\n persistentVolumeClaim:\n claimName: zne-pvc-azure-disk-01\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: zne-nginx-02\nspec:\n replicas: 1\n selector:\n matchLabels:\n app: zne-nginx\n zone: two\n template:\n metadata:\n labels:\n app: zne-nginx\n zone: two\n spec:\n affinity:\n nodeAffinity:\n preferredDuringSchedulingIgnoredDuringExecution:\n - weight: 1\n preference:\n matchExpressions:\n - key: topology.kubernetes.io/zone\n operator: In\n values:\n - westeurope-2\n topologySpreadConstraints:\n - maxSkew: 1\n topologyKey: kubernetes.io/hostname\n whenUnsatisfiable: DoNotSchedule\n labelSelector:\n matchLabels:\n app: zne-nginx\n nodeSelector:\n \"kubernetes.io/os\": linux\n containers:\n - image: mcr.microsoft.com/oss/nginx/nginx:1.17.3-alpine\n name: nginx-azuredisk\n resources:\n requests:\n memory: \"64Mi\"\n cpu: \"125m\"\n limits:\n memory: \"128Mi\"\n cpu: \"250m\"\n command:\n - \"/bin/sh\"\n - \"-c\"\n - while true; do echo $(date) >> /mnt/azuredisk/outfile; sleep 1; done\n volumeMounts:\n - name: zne-azure-disk-02\n mountPath: \"/mnt/azuredisk\"\n readOnly: false\n volumes:\n - name: zne-azure-disk-02\n persistentVolumeClaim:\n claimName: zne-pvc-azure-disk-02\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: zne-nginx-03\nspec:\n replicas: 1\n selector:\n matchLabels:\n app: zne-nginx\n zone: three\n template:\n metadata:\n labels:\n app: zne-nginx\n zone: three\n spec:\n affinity:\n nodeAffinity:\n preferredDuringSchedulingIgnoredDuringExecution:\n - weight: 1\n preference:\n matchExpressions:\n - key: topology.kubernetes.io/zone\n operator: In\n values:\n - westeurope-3\n topologySpreadConstraints:\n - maxSkew: 1\n topologyKey: kubernetes.io/hostname\n whenUnsatisfiable: DoNotSchedule\n labelSelector:\n matchLabels:\n app: zne-nginx\n nodeSelector:\n \"kubernetes.io/os\": linux\n containers:\n - image: mcr.microsoft.com/oss/nginx/nginx:1.17.3-alpine\n name: nginx-azuredisk\n resources:\n requests:\n memory: \"64Mi\"\n cpu: \"125m\"\n limits:\n memory: \"128Mi\"\n cpu: \"250m\"\n command:\n - \"/bin/sh\"\n - \"-c\"\n - while true; do echo $(date) >> /mnt/azuredisk/outfile; sleep 1; done\n volumeMounts:\n - name: zne-azure-disk-03\n mountPath: \"/mnt/azuredisk\"\n readOnly: false\n volumes:\n - name: zne-azure-disk-03\n persistentVolumeClaim:\n claimName: zne-pvc-azure-disk-03 \n \n The diagram below illustrates how the pods are distributed across the agent nodes and zonal node pools, along with the corresponding Locally Redundant Storage (LRS) managed disks. \n   \n The pods are distributed evenly across the zonal node pools, each within a separate availability zone, ensuring high availability and fault tolerance. Additionally, each pod is associated with an LRS managed disk that is located in the same availability zone as the pod. This ensures optimal data locality and minimizes network latency for disk operations. Overall, this distribution strategy enhances the resiliency and performance of the system, providing a reliable and efficient deployment architecture. \n Run the following command to retrieve information about the nodes in your Kubernetes cluster, including additional labels related to region and zone topology. \n \n kubectl get nodes -L kubernetes.azure.com/agentpool,topology.kubernetes.io/region,topology.kubernetes.io/zone \n \n The command should return a tabular output like the following that includes information about each node in the cluster, with additional columns for the specified labels  kubernetes.azure.com/agentpool ,  topology.kubernetes.io/region , and  topology.kubernetes.io/zone . \n \n NAME STATUS ROLES AGE VERSION AGENTPOOL REGION ZONE\naks-system-25336594-vmss000000 Ready agent 4d22h v1.28.3 system westeurope westeurope-1\naks-system-25336594-vmss000001 Ready agent 4d22h v1.28.3 system westeurope westeurope-2\naks-system-25336594-vmss000002 Ready agent 4d22h v1.28.3 system westeurope westeurope-3\naks-user01-13513131-vmss000000 Ready agent 4d22h v1.28.3 user01 westeurope westeurope-1\naks-user01-13513131-vmss000001 Ready agent 4d22h v1.28.3 user01 westeurope westeurope-1\naks-user02-14905318-vmss000000 Ready agent 4d22h v1.28.3 user02 westeurope westeurope-2\naks-user02-14905318-vmss000001 Ready agent 4d22h v1.28.3 user02 westeurope westeurope-2\naks-user03-34408806-vmss000000 Ready agent 4d22h v1.28.3 user03 westeurope westeurope-3\naks-user03-34408806-vmss000001 Ready agent 4d22h v1.28.3 user03 westeurope westeurope-3 \n \n You can note that the agent nodes of the three zonal node pools  user01 ,  user02 , and  user03  are located in different availability zones. Now run the following  kubectl  command that returns information about the pods in the  disk-test  namespace. \n \n kubectl get pod -o=custom-columns=NAME:.metadata.name,STATUS:.status.phase,IP:.status.podIP,HOSTIP:.status.hostIP,NODE:.spec.nodeName -n disk-test \n \n This command provides information on the pods' names and private IP addresses, as well as the hosting node's name and private IP address. Each pod is assigned to a different agent node, node pool, and availability zone to ensure optimal resiliency within the region. \n \n NAME STATUS IP HOSTIP NODE\nzne-nginx-01-5f8d87566-t68rz Running 10.242.0.70 10.241.0.4 aks-user01-13513131-vmss000000\nzne-nginx-02-7fb7769948-4t8z6 Running 10.242.0.117 10.241.0.8 aks-user02-14905318-vmss000000\nzne-nginx-03-7bb589bd98-97xfl Running 10.242.0.183 10.241.0.10 aks-user03-34408806-vmss000000 \n \n Let's observe the behavior when simulating a failure of one of the availability zones. Since the cluster consists of three zonal node pools, each composed of two nodes, we can simulate an availability zone failure by cordoning and draining the nodes of a single node pool. You can run the following script to cordon and drain the nodes of the  user01  node pool which nodes are located in  westeurope-1  zone. \n \n #!/bin/bash\n\n# Retrieve the nodes in the user01 agent pool\necho \"Retrieving the nodes in the user01 node pool...\"\nresult=$(kubectl get nodes -l kubernetes.azure.com/agentpool=user01 -o jsonpath='{.items[*].metadata.name}')\n\n# Convert the string of node names into an array\nnodeNames=($result)\n\nfor nodeName in ${nodeNames[@]}; do\n # Cordon the node running the pod\n echo \"Cordoning the [$nodeName] node...\"\n kubectl cordon $nodeName\n\n # Drain the node running the pod\n echo \"Draining the [$nodeName] node...\"\n kubectl drain $nodeName --ignore-daemonsets --delete-emptydir-data --force\ndone \n \n The script execution will produce an output similar to the following. \n \n Retrieving the nodes in the user01 node pool...\nCordoning the [aks-user01-13513131-vmss000000] node...\nnode/aks-user01-13513131-vmss000000 cordoned\nDraining the [aks-user01-13513131-vmss000000] node...\nnode/aks-user01-13513131-vmss000000 already cordoned\nWarning: ignoring DaemonSet-managed Pods: kube-system/ama-logs-lk62h, kube-system/azure-cns-xb4xj, kube-system/azure-npm-mgrzg, kube-system/cloud-node-manager-4bn54, kube-system/csi-azuredisk-node-f648w, kube-system/csi-azurefile-node-fx2wq, kube-system/kube-proxy-cq5p6, kube-system/microsoft-defender-collector-ds-t9df4, kube-system/microsoft-defender-publisher-ds-9gwf2\nnode/aks-user01-13513131-vmss000000 drained\nCordoning the [aks-user01-13513131-vmss000001] node...\nnode/aks-user01-13513131-vmss000001 cordoned\nDraining the [aks-user01-13513131-vmss000001] node...\nnode/aks-user01-13513131-vmss000001 already cordoned\nWarning: ignoring DaemonSet-managed Pods: kube-system/ama-logs-q78sq, kube-system/azure-cns-2sbcb, kube-system/azure-npm-qdxcz, kube-system/cloud-node-manager-6bvdq, kube-system/csi-azuredisk-node-w8s2j, kube-system/csi-azurefile-node-54lfj, kube-system/kube-proxy-g8d2t, kube-system/microsoft-defender-collector-ds-xngfl, kube-system/microsoft-defender-publisher-ds-pzdv6\nevicting pod disk-test/zne-nginx-01-58d48f5894-44vpg\npod/zne-nginx-01-58d48f5894-44vpg evicted\nnode/aks-user01-13513131-vmss000001 drained \n \n Run the following command to retrieve information about the nodes in your Kubernetes cluster, including additional labels related to region and zone topology. \n \n kubectl get nodes -L kubernetes.azure.com/agentpool,topology.kubernetes.io/region,topology.kubernetes.io/zone \n \n The command should return a tabular output like the following. \n \n NAME STATUS ROLES AGE VERSION AGENTPOOL REGION ZONE\naks-system-25336594-vmss000000 Ready agent 4d23h v1.28.3 system westeurope westeurope-1\naks-system-25336594-vmss000001 Ready agent 4d23h v1.28.3 system westeurope westeurope-2\naks-system-25336594-vmss000002 Ready agent 4d23h v1.28.3 system westeurope westeurope-3\naks-user01-13513131-vmss000000 Ready,SchedulingDisabled agent 4d23h v1.28.3 user01 westeurope westeurope-1\naks-user01-13513131-vmss000001 Ready,SchedulingDisabled agent 4d23h v1.28.3 user01 westeurope westeurope-1\naks-user02-14905318-vmss000000 Ready agent 4d22h v1.28.3 user02 westeurope westeurope-2\naks-user02-14905318-vmss000001 Ready agent 4d22h v1.28.3 user02 westeurope westeurope-2\naks-user03-34408806-vmss000000 Ready agent 4d22h v1.28.3 user03 westeurope westeurope-3\naks-user03-34408806-vmss000001 Ready agent 4d22h v1.28.3 user03 westeurope westeurope-3 \n \n From the output, you can observe that the nodes of the  user01  agent pool are now in a  SchedulingDisabled  status. This indicates that the Kubernetes scheduler is unable to schedule new pods onto these nodes. However, the agent nodes of the  user02  and  user03  node pools in the  westeurope-2  and  westeurope-2  zone are still in a  Ready  status. Now run the following  kubectl  command that returns information about the pods in the  disk-test  namespace. \n \n kubectl get pod -o=custom-columns=NAME:.metadata.name,STATUS:.status.phase,IP:.status.podIP,HOSTIP:.status.hostIP,NODE:.spec.nodeName -n disk-test \n \n The command returns an output like the following: \n \n NAME STATUS IP HOSTIP NODE\nzne-nginx-01-58d48f5894-ffll8 Pending <none> <none> <none>\nzne-nginx-02-75949cbfbb-xbt49 Running 10.242.0.118 10.241.0.8 aks-user02-14905318-vmss000000\nzne-nginx-03-5c8cbb657d-m5scc Running 10.242.0.199 10.241.0.12 aks-user03-34408806-vmss000001 \n \n The  zne-nginx-01-*  is now in a  Pending  status because the Kubernetes scheduler cannot find a node where to run it. This is due to the fact that the pod needs to mount a volume that is located in  westeurope-1  zone, but no nodes are available in that zone. Actually, there is a node in  westeurope-1  zone, but it belongs to the system node pool that is tainted with  CriticalAddonsOnly=true:NoSchedule , but the pod doesn't have the necessary toleration to match this taint. On the other hand, the  zne-nginx-02-*  and  zne-nginx-03-*  are still in a  Running  state as their agent nodes were not affected by the simulated availability zone failure. \n The following diagram shows what happened to the pods after their hosting nodes were cordoned and drained. \n   \n You can run the following script to uncordon the nodes of the  user01  node pool. Once these nodes are back in a  Ready  state, the Kubernetes scheduler will be able to run the  zne-nginx-01-*  pod on one of them. \n \n #!/bin/bash\n\n# Get all nodes\nnodes=$(kubectl get nodes -o json)\n\n# Loop over nodes\nfor node in $(echo \"${nodes}\" | jq -r '.items[].metadata.name'); do\n # Check if node is cordoned\n if kubectl get node \"${node}\" | grep -q \"SchedulingDisabled\"; then\n # Uncordon node\n echo \"Uncordoning node ${node}...\"\n kubectl uncordon \"${node}\"\n fi\ndone \n \n We will now simulate a node failure for each of the agent nodes that are running one of the  zne-nginx-*  pods. Each zonal node pool contains two nodes, so the Kubernetes scheduler should be able to reschedule each pod on another node within the same zonal node pool or availability zone. Run the following script to cordon and drain the agent nodes running the pods: \n \n #!/bin/bash\n\n# Variables\nsource ./00-variables.sh\n\n# Retrieve the names of the pods with the 'app=zne-nginx' label\necho \"Retrieving the names of the pods with the 'app=zne-nginx' label...\"\nresult=$(kubectl get pods -l app=zne-nginx -n $namespace -o jsonpath='{.items[*].metadata.name}')\n\n# Convert the string of pod names into an array\npodNames=($result)\n\nfor podName in ${podNames[@]}; do\n\n # Retrieve the name of the node running the pod\n nodeName=$(kubectl get pods $podName -n $namespace -o jsonpath='{.spec.nodeName}')\n\n if [ -n \"$nodeName\" ]; then\n echo \"The [$podName] pd runs on the [$nodeName] agent node\"\n else\n echo \"Failed to retrieve the name of the node running the [$podName] pod\"\n exit 1\n fi\n \n # Retrieve the availability zone of the node running the pod\n agentPoolZone=$(kubectl get nodes $nodeName -o jsonpath='{.metadata.labels.topology\\.kubernetes\\.io/zone}')\n\n if [ -n \"$agentPoolZone\" ]; then\n echo \"The [$nodeName] agent node is in the [$agentPoolZone] availability zone\"\n else\n echo \"Failed to retrieve the availability zone of the [$nodeName] agent node\"\n exit 1\n fi\n\n # Retrieve the name of the agent pool for the node running the pod\n agentPoolName=$(kubectl get nodes $nodeName -o jsonpath='{.metadata.labels.agentpool}')\n\n if [ -n \"$agentPoolName\" ]; then\n echo \"The [$nodeName] agent node belongs to the [$agentPoolName] agent pool\"\n else\n echo \"Failed to retrieve the name of the agent pool for the [$nodeName] agent node\"\n exit 1\n fi\n\n # Cordon the node running the pod\n echo \"Cordoning the [$nodeName] node...\"\n kubectl cordon $nodeName\n\n # Drain the node running the pod\n echo \"Draining the [$nodeName] node...\"\n kubectl drain $nodeName --ignore-daemonsets --delete-emptydir-data --force\ndone \n \n The script execution will produce an output similar to the following. \n \n Retrieving the names of the pods with the 'app=zne-nginx' label...\nThe [zne-nginx-01-5f8d87566-t68rz] pd runs on the [aks-user01-13513131-vmss000000] agent node\nThe [aks-user01-13513131-vmss000000] agent node is in the [westeurope-1] availability zone\nThe [aks-user01-13513131-vmss000000] agent node belongs to the [user01] agent pool\nCordoning the [aks-user01-13513131-vmss000000] node...\nnode/aks-user01-13513131-vmss000000 cordoned\nDraining the [aks-user01-13513131-vmss000000] node...\nnode/aks-user01-13513131-vmss000000 already cordoned\nWarning: ignoring DaemonSet-managed Pods: kube-system/ama-logs-fz794, kube-system/azure-cns-b9qnz, kube-system/azure-npm-mgrzg, kube-system/cloud-node-manager-4bn54, kube-system/csi-azuredisk-node-f648w, kube-system/csi-azurefile-node-fx2wq, kube-system/kube-proxy-cq5p6, kube-system/microsoft-defender-collector-ds-t9df4, kube-system/microsoft-defender-publisher-ds-9gwf2\nevicting pod disk-test/zne-nginx-01-5f8d87566-t68rz\npod/zne-nginx-01-5f8d87566-t68rz evicted\nnode/aks-user01-13513131-vmss000000 drained\nThe [zne-nginx-02-7fb7769948-4t8z6] pd runs on the [aks-user02-14905318-vmss000000] agent node\nThe [aks-user02-14905318-vmss000000] agent node is in the [westeurope-2] availability zone\nThe [aks-user02-14905318-vmss000000] agent node belongs to the [user02] agent pool\nCordoning the [aks-user02-14905318-vmss000000] node...\nnode/aks-user02-14905318-vmss000000 cordoned\nDraining the [aks-user02-14905318-vmss000000] node...\nnode/aks-user02-14905318-vmss000000 already cordoned\nWarning: ignoring DaemonSet-managed Pods: kube-system/ama-logs-8wrgd, kube-system/azure-cns-8fqft, kube-system/azure-npm-49mh5, kube-system/cloud-node-manager-c4nk8, kube-system/csi-azuredisk-node-nvt6q, kube-system/csi-azurefile-node-v7x87, kube-system/kube-proxy-tnjft, kube-system/microsoft-defender-collector-ds-2px57, kube-system/microsoft-defender-publisher-ds-q5k7n\nevicting pod disk-test/zne-nginx-02-7fb7769948-4t8z6\npod/zne-nginx-02-7fb7769948-4t8z6 evicted\nnode/aks-user02-14905318-vmss000000 drained\nThe [zne-nginx-03-7bb589bd98-97xfl] pd runs on the [aks-user03-34408806-vmss000000] agent node\nThe [aks-user03-34408806-vmss000000] agent node is in the [westeurope-3] availability zone\nThe [aks-user03-34408806-vmss000000] agent node belongs to the [user03] agent pool\nCordoning the [aks-user03-34408806-vmss000000] node...\nnode/aks-user03-34408806-vmss000000 cordoned\nDraining the [aks-user03-34408806-vmss000000] node...\nnode/aks-user03-34408806-vmss000000 already cordoned\nWarning: ignoring DaemonSet-managed Pods: kube-system/ama-logs-rvvqj, kube-system/azure-cns-88sqt, kube-system/azure-npm-xqs9r, kube-system/cloud-node-manager-qs94f, kube-system/csi-azuredisk-node-t6ps2, kube-system/csi-azurefile-node-xnswh, kube-system/kube-proxy-5vvgd, kube-system/microsoft-defender-collector-ds-24pql, kube-system/microsoft-defender-publisher-ds-lnf4b\nevicting pod disk-test/zne-nginx-03-7bb589bd98-97xfl\npod/zne-nginx-03-7bb589bd98-97xfl evicted\nnode/aks-user03-34408806-vmss000000 drained \n \n Run the following command to retrieve information about the nodes in your Kubernetes cluster, including additional labels related to region and zone topology. \n \n kubectl get nodes -L kubernetes.azure.com/agentpool,topology.kubernetes.io/region,topology.kubernetes.io/zone \n \n The command should return a tabular output like the following. \n   \n NAME STATUS ROLES AGE VERSION AGENTPOOL REGION ZONE\naks-system-25336594-vmss000000 Ready agent 17h v1.28.3 system westeurope westeurope-1\naks-system-25336594-vmss000001 Ready agent 17h v1.28.3 system westeurope westeurope-2\naks-system-25336594-vmss000002 Ready agent 17h v1.28.3 system westeurope westeurope-3\naks-user01-13513131-vmss000000 Ready,SchedulingDisabled agent 17h v1.28.3 user01 westeurope westeurope-1\naks-user01-13513131-vmss000001 Ready agent 17h v1.28.3 user01 westeurope westeurope-1\naks-user02-14905318-vmss000000 Ready,SchedulingDisabled agent 17h v1.28.3 user02 westeurope westeurope-2\naks-user02-14905318-vmss000001 Ready agent 17h v1.28.3 user02 westeurope westeurope-2\naks-user03-34408806-vmss000000 Ready,SchedulingDisabled agent 17h v1.28.3 user03 westeurope westeurope-3\naks-user03-34408806-vmss000001 Ready agent 17h v1.28.3 user03 westeurope westeurope-3\n \n   \n From the output, you can see that the nodes which were previously running the  zne-nginx-01-* ,  zne-nginx-02-* , and  zne-nginx-03-*  pods are now in a  SchedulingDisabled  status. This means that the Kubernetes scheduler cannot schedule new pods on these nodes. However, the  user01 ,  user02 , and  user03  node pools have an additional agent node in a  Ready  status, hence capable of running pods. Now run the following  kubectl  command that returns information about the pods in the  disk-test  namespace. \n \n kubectl get pod -o=custom-columns=NAME:.metadata.name,STATUS:.status.phase,IP:.status.podIP,HOSTIP:.status.hostIP,NODE:.spec.nodeName -n disk-test \n \n The command returns an output like the following: \n \n NAME STATUS IP HOSTIP NODE\nzne-nginx-01-5f8d87566-ntb49 Running 10.242.0.76 10.241.0.5 aks-user01-13513131-vmss000001\nzne-nginx-02-7fb7769948-9ftfj Running 10.242.0.149 10.241.0.7 aks-user02-14905318-vmss000001\nzne-nginx-03-7bb589bd98-gqwhx Running 10.242.0.207 10.241.0.12 aks-user03-34408806-vmss000001 \n \n As you can observe, all the pods as in a  Running  status. As shown in the following diagram, the Kubernetes scheduler was able to move each pod to another node in the same zonal node pool and availability zone. \n   \n You can run the following script to uncordon the nodes. \n \n #!/bin/bash\n\n# Get all nodes\nnodes=$(kubectl get nodes -o json)\n\n# Loop over nodes\nfor node in $(echo \"${nodes}\" | jq -r '.items[].metadata.name'); do\n # Check if node is cordoned\n if kubectl get node \"${node}\" | grep -q \"SchedulingDisabled\"; then\n # Uncordon node\n echo \"Uncordoning node ${node}...\"\n kubectl uncordon \"${node}\"\n fi\ndone \n \n As a final test, let's create a workload which makes use of ZRS storage. In this test, we will create a deployment that consists of a single pod replica that can run on an any agent nodes, in any zonal node pool and availability zone. The objective is to observe the behavior of the pod when we simulate a failure of the availability zone that hosts its agent node. To set up the necessary Kubernetes objects, you can use the provided script to create the following: \n   \n \n The  disk-test  namespace. \n The  managed-csi-premium-zrs  storage class. \n The  zrs-pvc-azure-disk  persistent volume claim (PVC). \n The  zrs-nginx  deployment. \n \n \n #!/bin/bash\n\n# Variables\nsource ./00-variables.sh\n\n# Check if namespace exists in the cluster\nresult=$(kubectl get namespace -o jsonpath=\"{.items[?(@.metadata.name=='$namespace')].metadata.name}\")\n\nif [[ -n $result ]]; then\n echo \"$namespace namespace already exists in the cluster\"\nelse\n echo \"$namespace namespace does not exist in the cluster\"\n echo \"creating $namespace namespace in the cluster...\"\n kubectl create namespace $namespace\nfi\n\n# Create the managed-csi-premium-zrs storage class\nkubectl apply -f managed-csi-premium-zrs.yml\n\n# Create the zrs-pvc-azure-disk persistent volume claim\nkubectl apply -f zrs-pvc.yml -n $namespace\n\n# Create the zrs-nginx deployment\nkubectl apply -f zrs-deploy.yml -n $namespace \n \n The following YAML manifest defines the  zrs-pvc-azure-disk  persistent volume claim. This PVC utilizes the built-in  managed-csi-premium-zrs  storage class, which uses  Premium_ZRS  storage. \n \n apiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n name: zrs-pvc-azure-disk\nspec:\n accessModes:\n - ReadWriteOnce\n resources:\n requests:\n storage: 10Gi\n storageClassName: managed-csi-premium-zrs \n \n The following YAML manifest defines the  zrs-nginx  deployment. Here are some important observations: \n   \n \n The deployment consists of a single pod replica. \n Pod Topology Spread Constraints are configured to distribute pod replicas across availability zones and different nodes within a single availability zone. \n The deployment uses the  zrs-pvc-azure-disk  persistent volume claim to create and attach a zonal ZRS Premium SSD managed disk. This disk is replicated across three availability zones. The Azure disk is created in the node resource group, which contains all the infrastructure resources associated with the AKS cluster. The managed disk has the same name as the corresponding Kubernetes persistent volume. \n \n \n apiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: zrs-nginx\nspec:\n replicas: 1\n selector:\n matchLabels:\n app: zrs-nginx\n template:\n metadata:\n labels:\n app: zrs-nginx\n spec:\n topologySpreadConstraints:\n - maxSkew: 1\n topologyKey: topology.kubernetes.io/zone\n whenUnsatisfiable: DoNotSchedule\n labelSelector:\n matchLabels:\n app: zrs-nginx\n - maxSkew: 1\n topologyKey: kubernetes.io/hostname\n whenUnsatisfiable: DoNotSchedule\n labelSelector:\n matchLabels:\n app: zrs-nginx\n nodeSelector:\n \"kubernetes.io/os\": linux\n containers:\n - image: mcr.microsoft.com/oss/nginx/nginx:1.17.3-alpine\n name: nginx-azuredisk\n resources:\n requests:\n memory: \"64Mi\"\n cpu: \"125m\"\n limits:\n memory: \"128Mi\"\n cpu: \"250m\"\n command:\n - \"/bin/sh\"\n - \"-c\"\n - while true; do echo $(date) >> /mnt/azuredisk/outfile; sleep 1; done\n volumeMounts:\n - name: zrs-azure-disk\n mountPath: \"/mnt/azuredisk\"\n readOnly: false\n volumes:\n - name: zrs-azure-disk\n persistentVolumeClaim:\n claimName: zrs-pvc-azure-disk \n \n Please note that the system-mode node pool is tainted with  CriticalAddonsOnly=true:NoSchedule . This taint prevents pods without the corresponding toleration from running on the agent nodes of this node pool. In our test deployments, we did not include this toleration. Therefore, when we create the deployment, the Kubernetes scheduler will place the pod on an agent node of one of the three zonal node pools, which don't have any taint. \n The diagram below illustrates the pod and its Zone-Redundant Storage (ZRS) managed disk in the cluster. \n   \n As observed in the previous section, Zone-redundant storage (ZRS) replicates Azure Disk data synchronously across three Azure availability zones within the same region. With ZRS, your data remains accessible for both read and write operations even if one zone becomes unavailable. However, during zone unavailability, Azure may perform networking updates such as DNS repointing, which could temporarily impact your application. To design applications for ZRS, it is advised to follow best practices for handling transient faults, including implementing retry policies with exponential back-off. \n Run the following command to retrieve information about the nodes in your Kubernetes cluster, including additional labels related to region and zone topology. \n \n kubectl get nodes -L kubernetes.azure.com/agentpool,topology.kubernetes.io/region,topology.kubernetes.io/zone \n \n The command should return a tabular output like the following that includes information about each node in the cluster, with additional columns for the specified labels  kubernetes.azure.com/agentpool ,  topology.kubernetes.io/region , and  topology.kubernetes.io/zone . \n \n NAME STATUS ROLES AGE VERSION AGENTPOOL REGION ZONE\naks-system-25336594-vmss000000 Ready agent 5d22h v1.28.3 system westeurope westeurope-1\naks-system-25336594-vmss000001 Ready agent 5d22h v1.28.3 system westeurope westeurope-2\naks-system-25336594-vmss000002 Ready agent 5d22h v1.28.3 system westeurope westeurope-3\naks-user01-13513131-vmss000000 Ready agent 5d22h v1.28.3 user01 westeurope westeurope-1\naks-user01-13513131-vmss000001 Ready agent 5d22h v1.28.3 user01 westeurope westeurope-1\naks-user02-14905318-vmss000000 Ready agent 5d22h v1.28.3 user02 westeurope westeurope-2\naks-user02-14905318-vmss000001 Ready agent 5d22h v1.28.3 user02 westeurope westeurope-2\naks-user03-34408806-vmss000000 Ready agent 5d22h v1.28.3 user03 westeurope westeurope-3\naks-user03-34408806-vmss000001 Ready agent 5d22h v1.28.3 user03 westeurope westeurope-3 \n \n You can note that the agent nodes of the three zonal node pools  user01 ,  user02 , and  user03  are located in different availability zones. Now run the following  kubectl  command that returns information about the pod in the  disk-test  namespace. \n \n kubectl get pod -o=custom-columns=NAME:.metadata.name,STATUS:.status.phase,IP:.status.podIP,HOSTIP:.status.hostIP,NODE:.spec.nodeName -n disk-test \n \n This command provides information on the pod's name and private IP address, as well as the hosting node's name and private IP address. In this case, the pod is hosted in the first node of the  user03  node pool in the  westeurope-3  availability zone. \n \n NAME STATUS IP HOSTIP NODE\nzrs-nginx-b86595984-xf2pg Running 10.242.0.187 10.241.0.10 aks-user03-34408806-vmss000000 \n \n Let's observe the behavior when simulating a failure of all the nodes of the  user03  node pool in the  westeurope-3  availability zone. Since the cluster consists of three zonal node pools, each composed of two nodes, we can simulate an availability zone failure by cordoning and draining the nodes of the  user03  node pool. You can run the following script to cordon and drain the nodes of the  user03  node pool: \n \n #!/bin/bash\n\n# Retrieve the nodes in the user01 agent pool\necho \"Retrieving the nodes in the user03 node pool...\"\nresult=$(kubectl get nodes -l kubernetes.azure.com/agentpool=user03 -o jsonpath='{.items[*].metadata.name}')\n\n# Convert the string of node names into an array\nnodeNames=($result)\n\nfor nodeName in ${nodeNames[@]}; do\n # Cordon the node running the pod\n echo \"Cordoning the [$nodeName] node...\"\n kubectl cordon $nodeName\n\n # Drain the node running the pod\n echo \"Draining the [$nodeName] node...\"\n kubectl drain $nodeName --ignore-daemonsets --delete-emptydir-data --force\ndone \n \n The script execution will produce an output similar to the following. \n \n disk-test namespace does not exist in the cluster\ncreating disk-test namespace in the cluster...\nnamespace/disk-test created\nstorageclass.storage.k8s.io/managed-csi-premium-zrs unchanged\npersistentvolumeclaim/zrs-pvc-azure-disk created\ndeployment.apps/zrs-nginx created\n(base) paolos@Plunko:three-zonal-node-pools$ ./04-cordon-and-drain-nodes-running-pods-in-one-az.sh \nRetrieving the ones in the user0 node pool...\nCordoning the [aks-user03-34408806-vmss000000] node...\nnode/aks-user03-34408806-vmss000000 cordoned\nDraining the [aks-user03-34408806-vmss000000] node...\nnode/aks-user03-34408806-vmss000000 already cordoned\nWarning: ignoring DaemonSet-managed Pods: kube-system/ama-logs-pvzrt, kube-system/azure-cns-j7477, kube-system/azure-npm-xqs9r, kube-system/cloud-node-manager-qs94f, kube-system/csi-azuredisk-node-t6ps2, kube-system/csi-azurefile-node-xnswh, kube-system/kube-proxy-5vvgd, kube-system/microsoft-defender-collector-ds-24pql, kube-system/microsoft-defender-publisher-ds-lnf4b\nevicting pod disk-test/zrs-nginx-b86595984-xf2pg\npod/zrs-nginx-b86595984-xf2pg evicted\nnode/aks-user03-34408806-vmss000000 drained\nCordoning the [aks-user03-34408806-vmss000001] node...\nnode/aks-user03-34408806-vmss000001 cordoned\nDraining the [aks-user03-34408806-vmss000001] node...\nnode/aks-user03-34408806-vmss000001 already cordoned\nWarning: ignoring DaemonSet-managed Pods: kube-system/ama-logs-qdscd, kube-system/azure-cns-tpvj9, kube-system/azure-npm-st58w, kube-system/cloud-node-manager-tmw64, kube-system/csi-azuredisk-node-qwlws, kube-system/csi-azurefile-node-fz9xn, kube-system/kube-proxy-wg48x, kube-system/microsoft-defender-collector-ds-s4mcw, kube-system/microsoft-defender-publisher-ds-6q6z9\nevicting pod disk-test/zrs-nginx-b86595984-wfrqd\npod/zrs-nginx-b86595984-wfrqd evicted\nnode/aks-user03-34408806-vmss000001 drained \n \n Run the following command to retrieve information about the nodes in your Kubernetes cluster, including additional labels related to region and zone topology. \n \n kubectl get nodes -L kubernetes.azure.com/agentpool,topology.kubernetes.io/region,topology.kubernetes.io/zone \n \n The command should return a tabular output like the following: \n \n NAME STATUS ROLES AGE VERSION AGENTPOOL REGION ZONE\naks-system-25336594-vmss000000 Ready agent 5d22h v1.28.3 system westeurope westeurope-1\naks-system-25336594-vmss000001 Ready agent 5d22h v1.28.3 system westeurope westeurope-2\naks-system-25336594-vmss000002 Ready agent 5d22h v1.28.3 system westeurope westeurope-3\naks-user01-13513131-vmss000000 Ready agent 5d22h v1.28.3 user01 westeurope westeurope-1\naks-user01-13513131-vmss000001 Ready agent 5d22h v1.28.3 user01 westeurope westeurope-1\naks-user02-14905318-vmss000000 Ready agent 5d22h v1.28.3 user02 westeurope westeurope-2\naks-user02-14905318-vmss000001 Ready agent 5d22h v1.28.3 user02 westeurope westeurope-2\naks-user03-34408806-vmss000000 Ready,SchedulingDisabled agent 5d22h v1.28.3 user03 westeurope westeurope-3\naks-user03-34408806-vmss000001 Ready,SchedulingDisabled agent 5d22h v1.28.3 user03 westeurope westeurope-3 \n \n From the output, you can observe that the nodes of the  user03  agent pool are now in a  SchedulingDisabled  status. This indicates that the Kubernetes scheduler is unable to schedule new pods onto these nodes. However, the agent nodes of the  user01  and  user02  node pools in the  westeurope-1  and  westeurope-2  zone are still in a  Ready  status. Now run the following  kubectl  command that returns information about the pod in the  disk-test  namespace. \n \n kubectl get pod -o=custom-columns=NAME:.metadata.name,STATUS:.status.phase,IP:.status.podIP,HOSTIP:.status.hostIP,NODE:.spec.nodeName -n disk-test \n \n The command returns an output like the following: \n \n NAME STATUS IP HOSTIP NODE\nzrs-nginx-b86595984-tgcbm Running 10.242.0.145 10.241.0.7 aks-user02-14905318-vmss000001 \n \n The  zrs-nginx-*  pod was successfully rescheduled on another agent node in another node pool and availability zone. The following diagram shows what happened to the pods after their hosting nodes were cordoned and drained. \n   \n You can run the following script to uncordon the nodes. \n #!/bin/bash\n\n# Get all nodes\nnodes=$(kubectl get nodes -o json)\n\n# Loop over nodes\nfor node in $(echo \"${nodes}\" | jq -r '.items[].metadata.name'); do\n # Check if node is cordoned\n if kubectl get node \"${node}\" | grep -q \"SchedulingDisabled\"; then\n # Uncordon node\n echo \"Uncordoning node ${node}...\"\n kubectl uncordon \"${node}\"\n fi\ndone \n Conclusions \n This article discussed two approaches for creating a zone redundant AKS cluster: \n   \n \n Zone Redundant Node Pool: This approach consists in creating a zone redundant node pool, where nodes are distributed across multiple Availability Zones. This ensures that the node pool can handle failures in any zone while maintaining the desired functionality.\n \n Pros: The advantage of this approach is that you can use a single deployment and Pod Topology Spread Constraints to distribute the pod replicas across the availability zones within a region. \n Cons: a drawback is that you need to use zone-redundant storage (ZRS) to guarantee that Azure Disks mounted as persistent volumes can be accessed from any availability zone. zone-redundant storage (ZRS) storage provides better intra-region resiliency than locally redundant storage (LRS), but it's more costly. \n \n \n AKS Cluster with three Node Pools: Another approach involves creating an AKS cluster with three node pools, each assigned to a different availability zone. This ensures redundancy across zones in the cluster.\n \n Pros: The advantage of this approach is that you can use Locally redundant storage (LRS) when creating and mounting Azure disks, which are less expensive than zone-redundant storage (ZRS) Azure disks. \n Cons: a drawback is that you need to create and scale multiple separate deployments, one for each availability zone, for the same workload. \n \n \n ","body@stripHtml({\"removeProcessingText\":true,\"removeSpoilerMarkup\":true,\"removeTocMarkup\":true,\"truncateLength\":-1})@stringLength":"138429","kudosSumWeight":8,"repliesCount":2,"readOnly":false,"images":{"__typename":"AssociatedImageConnection","edges":[{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDE","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDM2MjU0LTU0NDQxOGk2NTNEOURBOTc1N0VGQjMz?revision=7\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDI","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDM2MjU0LTU0NDQyMGk1RjgxMkFDQUU5RjRCQUI5?revision=7\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDM","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDM2MjU0LTU0NDQyMWk0MzQyQTREN0E2MkQyQUY4?revision=7\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDQ","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDM2MjU0LTU0NDQyMmk2QzI1M0E5MjkxODgxOTg1?revision=7\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDU","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDM2MjU0LTU0NDQyNGk1NzJBODc4N0NCM0QzODlF?revision=7\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDY","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDM2MjU0LTU0NDQyOGlBNEZFMTJCNDBDMUY3RTAx?revision=7\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDc","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDM2MjU0LTU0NDQzNWk0MDAzMDFEMUExQjM1MDYw?revision=7\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDg","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDM2MjU0LTU0NDQ0NGk1MTU5QUZFOEYwNkM2Mjk0?revision=7\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDk","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDM2MjU0LTU0NDQ3M2k5Njg5MkMzMkY1NEMwNUJD?revision=7\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDEw","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDM2MjU0LTU0NDUwNGkzMzMwMzRBQUVDRDY3QTA2?revision=7\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDEx","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDM2MjU0LTU0NDUwNWkyOUM4MENBRTVGMkFCNzQ2?revision=7\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDEy","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDM2MjU0LTU0NTA5MmlFMTI3NzI3NDBDOURGODNF?revision=7\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDEz","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDM2MjU0LTU0NTA5NmkwNDNGNUNBQkIwRjM3RUJF?revision=7\"}"}}],"totalCount":13,"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}},"videos":{"__typename":"VideoConnection","edges":[],"totalCount":0,"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}},"coverImage":null,"coverImageProperties":{"__typename":"CoverImageProperties","style":"STANDARD","titlePosition":"BOTTOM","altText":""}},"Conversation:conversation:4357383":{"__typename":"Conversation","id":"conversation:4357383","topic":{"__typename":"BlogTopicMessage","uid":4357383},"lastPostingActivityTime":"2024-12-13T12:21:12.108-08:00","solved":false},"User:user:2052111":{"__typename":"User","uid":2052111,"login":"JillArmourMicrosoft","registrationData":{"__typename":"RegistrationData","status":null},"deleted":false,"avatar":{"__typename":"UserAvatar","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/dS0yMDUyMTExLTUyMzQ4M2k0NzRFNjIxNUE3NkI4NzIw"},"id":"user:2052111"},"BlogTopicMessage:message:4357383":{"__typename":"BlogTopicMessage","subject":"FastTrack for Azure (FTA) program retiring December 2024","conversation":{"__ref":"Conversation:conversation:4357383"},"id":"message:4357383","revisionNum":1,"uid":4357383,"depth":0,"board":{"__ref":"Blog:board:FastTrackforAzureBlog"},"author":{"__ref":"User:user:2052111"},"teaser@stripHtml({\"removeProcessingText\":true,\"truncateLength\":-1})":"","introduction":"","metrics":{"__typename":"MessageMetrics","views":572},"postTime":"2024-12-13T12:21:12.108-08:00","lastPublishTime":"2024-12-13T12:21:12.108-08:00","body@stripHtml({\"removeProcessingText\":true,\"removeSpoilerMarkup\":true,\"removeTocMarkup\":true,\"truncateLength\":-1})":" ATTENTION: \n As of December 31st, 2024, the FastTrack for Azure (FTA) program will be retired.  FTA will support any projects currently in motion to ensure successful completion by December 31st, 2024, but will no longer accept new nominations. \n   \n For more information on available programs and resources, visit: Azure Migrate, Modernize, and Innovate | Microsoft Azure ","body@stripHtml({\"removeProcessingText\":true,\"removeSpoilerMarkup\":true,\"removeTocMarkup\":true,\"truncateLength\":-1})@stringLength":"385","kudosSumWeight":0,"repliesCount":0,"readOnly":true,"images":{"__typename":"AssociatedImageConnection","edges":[],"totalCount":0,"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}},"videos":{"__typename":"VideoConnection","edges":[],"totalCount":0,"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}},"coverImage":null,"coverImageProperties":{"__typename":"CoverImageProperties","style":"STANDARD","titlePosition":"BOTTOM","altText":""}},"Conversation:conversation:4218101":{"__typename":"Conversation","id":"conversation:4218101","topic":{"__typename":"BlogTopicMessage","uid":4218101},"lastPostingActivityTime":"2024-09-26T06:03:44.722-07:00","solved":false},"User:user:277210":{"__typename":"User","uid":277210,"login":"adelagarde","registrationData":{"__typename":"RegistrationData","status":null},"deleted":false,"avatar":{"__typename":"UserAvatar","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/dS0yNzcyMTAtNTUyMzE0aUNCNDAwRDRGRjRDQjVEMUM"},"id":"user:277210"},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MjE4MTAxLTYyMzc2OGk0NzlGMjlGMkE1QTIwQkIw?revision=22\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MjE4MTAxLTYyMzc2OGk0NzlGMjlGMkE1QTIwQkIw?revision=22","title":"Before.png","associationType":"BODY","width":1627,"height":797,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MjE4MTAxLTYyMzc3MGk5MUNDQ0MxNDkxQTBBRjQx?revision=22\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MjE4MTAxLTYyMzc3MGk5MUNDQ0MxNDkxQTBBRjQx?revision=22","title":"After.png","associationType":"BODY","width":1627,"height":802,"altText":null},"BlogTopicMessage:message:4218101":{"__typename":"BlogTopicMessage","subject":"Creating a Local Network Virtual Appliance in Azure for Oracle Database@Azure","conversation":{"__ref":"Conversation:conversation:4218101"},"id":"message:4218101","revisionNum":22,"uid":4218101,"depth":0,"board":{"__ref":"Blog:board:FastTrackforAzureBlog"},"author":{"__ref":"User:user:277210"},"teaser@stripHtml({\"removeProcessingText\":true,\"truncateLength\":-1})":" In this article learn how to properly implement a Network Virtual Appliance for Oracle Database@Azure  \n   \n   ","introduction":"","metrics":{"__typename":"MessageMetrics","views":3163},"postTime":"2024-08-13T13:21:30.949-07:00","lastPublishTime":"2024-09-26T06:03:44.722-07:00","body@stripHtml({\"removeProcessingText\":true,\"removeSpoilerMarkup\":true,\"removeTocMarkup\":true,\"truncateLength\":-1})":" Oracle Database@Azure is an Oracle database service running on Oracle Cloud Infrastructure (OCI), collocated in Microsoft data centers. This ensures that the Oracle Database@Azure service has the fastest possible access to Azure resources and applications. The solution is intended to support the migration of Oracle database workloads to Azure, where customers can integrate and innovate with the breadth of Microsoft Cloud services. For more information and to gain a better understanding of Oracle Database@Azure please visit Overview - Oracle Database@Azure | Microsoft Learn \n   \n The current Oracle Database@Azure service has a network limitation where it cannot respond to network connections outside of its Azure virtual network (VNet ) when it is expected to route through a firewall. This limitation places constraints on extending integration to Azure services not located within the same Vnet. This issue also impacts network communication from on-premises that need to connect to the Oracle Database@Azure service. \n   \n \n To address this network limitation, the recommended solution is to deploy a Network Virtual Appliance (NVA) within the Oracle Database@Azure VNet. While Microsoft and Oracle are working together to develop an update to the Azure platform that will eliminate this limitation, customers will need to follow this design pattern until the official rollout of the update. \n \n   \n Deploying an NVA \n The NVA consists of a Linux virtual machine (VM) and any supported distribution on Azure can be used. The NVA referenced in this article is not a traditional firewall, but a VM acting as a router with IP forwarding enabled and not intended to be an enterprise-scale Firewall NVA.  This solution is only expected to help customers bridge the gap until the jointly engineered design pattern is available in all Azure regions. \n   \n The deployment of the NVA helps solve the specific scenarios outlined below: \n   \n \n Where traffic inspection is required between Oracle Database@Azure and other resources \n Where native network support is not available \n With resources that have private endpoints \n Resources on another Azure virtual network (VNet) \n Services with delegated subnets \n Connectivity with on-premises \n \n Additional details on supported network topologies can be found within the following article Network planning for Oracle Database@Azure | Microsoft Learn \n   \n Scope \n This article's scope will review a network scenario within an Azure Landing Zone requiring an NVA. The deployment steps of the NVA and other ancillary steps required to complete the end-to-end implementation are included. This article does not cover the hybrid connectivity from on-premises to Azure. That scenario will be covered in a later article; however, both share the same method of using User Defined Routes (UDR). \n   \n Scenario Review \n The Azure Landing Zone consists of Hub and Spoke architecture where the application layer is hosted in a Vnet specific for the application front end services, such as web servers. The Oracle Database@Azure is deployed in a separate dedicated Vnet for data. The goal is to provide bidirectional network connectivity between the application layer and the data layer. \n   \n   \n \n   \n   \n   \n Deployment \n The steps provided in this article should be followed in the designated order to ensure the expected results. Please consult with either your Microsoft or Oracle representative if you have specific questions related to your environment. \n   \n Environment Overview \n \n Hub VNet (10.0.0.0/16) \n \n Hub NVA: 10.0.0.4 \n \n Spoke 1 VNet - Application Tier (10.1.0.0/16) \n \n Application Server: 10.1.0.4 \n \n Spoke 2 VNet - Oracle Database (10.2.0.0/16) \n \n Oracle DB Subnet: 10.2.0.0/24 \n Oracle Database: 10.2.0.4 \n Local NVA Subnet: 10.2.1.0/24 \n Local NVA: 10.2.1.4 \n \n \n Note: At the time this article was published, Azure Firewall is currently not supported in this scenario. Third-party NVA’s native support is scheduled for 2024, but subject to change. Third-Party NVA's require this workaround to satisfy the above-mentioned scenario to support network communication until these features are fully implemented on Azure. \n   \n Create a Linux VM in Azure as an NVA Set up a Linux VM (using any supported distributions on Azure) in the desired resource group and region as the Oracle Database@Azure using your deployment method of choice (for example Azure portal, Azure PowerShell, or Azure CLI). As a security recommendation, be sure to leverage Secure Shell (SSH) public/private keys to ensure secure communication. Ensure the VM is in the same Vnet, but on a separate subnet from the Oracle Database@Azure delegated subnet as well as the dedicated Oracle backup subnet if it has been deployed \n   \n Note: Sizing is very much driven by the actual traffic pattern. Consider how much traffic (volume) packets per second are required to support the implementation. Starting with a 2-core general-purpose VM (D2s_v5 with 2 vCPUs) and 8 GiB (gibibytes) of memory including accelerated networking which can be used to gauge initial performance. High storage/IOPS performance SKUs are not necessary for this use case. \n   \n As part of the deployment and monitoring strategy please consult Welcome | Azure Monitor Baseline Alerts for the proper Azure Monitor counters that should be enabled against the NVA to ensure performance and availability. \n   \n Enable IP Forwarding on the VM's NIC (Network Interface Cards) \n \n Go to the Networking section of the NVA VM in the Azure portal \n Select the Network Interface \n Under Settings, choose IP configurations \n Enable IP forwarding \n \n   \n   \n Enable IP Forwarding at the Operating System level \n \n SSH into the VM. \n Edit the sysctl configuration file to enable IP forwarding: sudo nano /etc/sysctl.conf \n Uncomment the following line: net.ipv4.ip_forward = 1 \n Save and exit out of nano to apply the changes \n Run the following command to reset the network status to forward network traffic without a reboot on the VM:  sudo sysctl -p and hit enter. You should see the following line net.ipv4.ip_forward = 1  will appear on the screen indicating the changes were made successfully. \n \n We now need to implement iptables rules to route traffic properly through the NVA. When using iptables after a reboot Linux systems will lose their iptables rules. In order to avoid that we will install some packages and make some configurations. In the first example we will use either an Ubuntu or Debian Linux distribution. We will only be using IPv4 with the following changes on the Linux system listed in this article. \n   \n Ubuntu / Debian Linux system \n   \n Ensure that the local firewall on the NVA is enabled or set to not block traffic. First start by enabling iptables by running the following command sudo systemctl enable iptables and hit enter. Then type  sudo systemctl start iptables and hit enter. \n   \n To list the current iptables rules by running the following command sudo iptables -L and hit enter. This will list any possible firewall rules.  \n   \n Note: If there are rules disable them with the following command sudo iptables -F and hit enter.  \n   \n We need to install a package called iptables-persistent by typing the following command:  \n   \n On a Ubuntu system type sudo apt install iptables-persistent and hit enter. \n   \n On a Debian system type sudo apt-get install iptables-persistent and hit enter. \n   \n Make sure services are enabled on Debian or Ubuntu using the systemctl command:  \n   \n sudo systemctl is-enabled netfilter-persistent.service hit enter. \n   \n If not enabled type the following command: \n   \n sudo systemctl enable netfilter-persistent.service hit enter. \n   \n Get the status of the service by running the following command: \n   \n sudo systemctl status netfilter-persistent.service hit enter \n   \n Enter the following commands line by line and hit enter for each: \n   \n sudo iptables -t nat -A POSTROUTING -j MASQUERADE   \n sudo iptables -A INPUT -i eth0 -m state --state RELATED,ESTABLISHED -j ACCEPT \n sudo iptables -A FORWARD -j ACCEPT \n   \n Validate the iptables rules are in place by typing sudo iptables -L and hit enter.  \n   \n The iptables rules applied will be saved and loaded if the system reboots.  \n   \n In the second example we will use either a RedHat Enterprise Linux System (RHEL), Fedora, and AlmaLinux. The system commands are similar for the following Linux distributions. \n   \n RHEL/Fedora/AlmaLinux  \n   \n Type the following commands line by line and hit enter for each to disable firewalld: \n   \n sudo systemctl stop firewalld.service sudo systemctl disable firewalld.service sudo systemctl mask firewalld.service \n   \n Next, we must install the iptables-services package by either using the native yum or dnf package management commands. \n   \n The following example uses yum. Type each the following command line by line followed by hitting enter for each: \n   \n sudo yum install iptables-services sudo systemctl enable iptables sudo systemctl enable ip6tables sudo systemctl status iptables   \n   \n If we use dnf enter each line by line and hit enter : \n   \n sudo dnf install iptables-services sudo systemctl enable iptables sudo systemctl enable ip6tables sudo systemctl status iptables \n   \n Once the service is installed, you can configure the /etc/sysconfig/iptables file for IPv4. Any rules added to this file makes them persistent. You can use your favorite editor vi, vim, or nano to edit the file. Add the following line by line and save the file once complete. \n   \n sudo iptables -t nat -A POSTROUTING -j MASQUERADE   \n sudo iptables -A INPUT -i eth0 -m state --state RELATED,ESTABLISHED -j ACCEPT \n sudo iptables -A FORWARD -j ACCEPT \n   \n Next, we need to load the changes that were just made by typing the following command: \n   \n sudo systemctl restart iptables and then hit enter. \n   \n \n Ensure that the Network Security Group (NSG) on the NVA is allowing all traffic from the application Vnet and the Oracle Database@Azure delegated subnet. \n   \n   \n Configure Route Tables \n   \n Oracle Database@Azure Vnet (Spoke) \n \n Create a route table in the Azure portal in the same region and proper resource group (RG) where the Oracle Database@Azure is located. Give it a meaningful name. \n Add routes to the route table: \n Oracle Database Subnet: Associate the route table with this subnet. \n From Oracle Database Subnet: Set the next hop for 0.0.0.0/0 to the local NVA VM. \n \n Important: Ensure in the configuration of the route table that all route propagation is disabled. This setup ensures that all traffic to and from the Oracle Database is forced through your local NVA. \n   \n Configure Route Tables for the NVA in the Oracle Database azure Vnet \n \n Create another route table in the Azure portal in the same region and proper resource group (RG) where the Oracle Database@Azure is located. Give it a meaningful name. \n Add routes to the route table: \n NVA Subnet: Associate the route table with this subnet. \n From NVA Subnet: Set the next hop for 0.0.0.0/0 to the HUB NVA (10.0.0.4). \n \n Important: Ensure in the configuration of the route table that all route propagation is disabled. This setup ensures that all traffic to and from the Oracle Database is forced through your local NVA. \n   \n Route Configuration Application Tier \n   \n Route to Hub NVA \n \n Create another route table in the Azure portal in the same region and proper resource group (RG) where the Oracle Database@Azure is located. Give it a meaningful name. \n Application Subnet: Attach the route table to the Application Subnet in the application Vnet. \n Route from Application Vnet: Destination: 10.2.0.0/24 (Oracle Database Subnet) Next Hop: 10.0.0.4 (Hub NVA) \n \n Important: Ensure in the configuration of the route table that all route propagation is disabled. This setup ensures that all traffic to and from the Oracle Database is forced through your local NVA. \n   \n Route Configuration Hub VNet \n   \n Route to Local NVA: \n \n Create another route table in the Azure portal in the same region and proper resource group (RG) where the Oracle Database@Azure is located. Give it a meaningful name. \n Firewall Subnet: Attach the route table to the Firewall Subnet in the Hub Vnet. \n From Firewall Subnet: Set the next hop 10.2.0.0/24 (Oracle Subnet) to 10.2.1.4 (Local NVA) \n Please ensure if you have a Cisco or Palo Alto or other third-party NVA’s that there are no internal static routes that may conflict with the custom route table from Azure.   \n \n Important: Ensure in the configuration of the route table that all route propagation is disabled. This setup ensures that all traffic to and from the Oracle Database is forced through your local NVA. \n   \n When finished the implementation network flow and environment should match the following diagram: \n   \n \n   \n Testing \n The next step is to start testing by initiating a connection from the application servers. Make sure the proper components have been installed on the application servers to connect to the Oracle Database@Azure before validating connectivity. Validate that the application servers can connect to the Oracle Database@Azure.  \n   \n If you need to troubleshoot deploy a test Linux vm on the application subnet to test connectivity. Install the mtr package as a tool on the Linux test vm. Do not rely on ping (ICMP) to troubleshoot as this will not properly test connectivity within Azure. An example of the command using mtr would be the following: sudo mtr -T -n -P 1521 10.2.0.4. This example starts a trace attempting to connect to the database without using ICMP. The network port of 1521 is selected which the database listens on for connections. Review the route tables and IP addresses were entered correctly if a problem is identified. If the initial tests are successful, you have implemented this solution correctly. \n   \n Next Steps \n Please visit the Microsoft Cloud Adoption Framework (CAF ) Introduction to Oracle on Azure adoption scenarios - Cloud Adoption Framework | Microsoft Learn \n   \n Authors Moises Gomez Cortez Technical Editor and Content Contributor Anthony de Lagarde, Erik Munson \n   \n   \n   \n   ","body@stripHtml({\"removeProcessingText\":true,\"removeSpoilerMarkup\":true,\"removeTocMarkup\":true,\"truncateLength\":-1})@stringLength":"14713","kudosSumWeight":2,"repliesCount":0,"readOnly":false,"images":{"__typename":"AssociatedImageConnection","edges":[{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDE","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MjE4MTAxLTYyMzc2OGk0NzlGMjlGMkE1QTIwQkIw?revision=22\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDI","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MjE4MTAxLTYyMzc3MGk5MUNDQ0MxNDkxQTBBRjQx?revision=22\"}"}}],"totalCount":2,"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}},"videos":{"__typename":"VideoConnection","edges":[],"totalCount":0,"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}},"coverImage":null,"coverImageProperties":{"__typename":"CoverImageProperties","style":"STANDARD","titlePosition":"BOTTOM","altText":""}},"Conversation:conversation:4068757":{"__typename":"Conversation","id":"conversation:4068757","topic":{"__typename":"BlogTopicMessage","uid":4068757},"lastPostingActivityTime":"2024-09-04T08:14:57.396-07:00","solved":false},"User:user:1103062":{"__typename":"User","uid":1103062,"login":"OlgaMolocenco","registrationData":{"__typename":"RegistrationData","status":null},"deleted":false,"avatar":{"__typename":"UserAvatar","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/dS0xMTAzMDYyLTQyMDgyM2k2OEIxMUJDODg0QTY1OTgz"},"id":"user:1103062"},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDY4NzU3LTU1NTY4MWk1RjU2MTM4NEUzNjg3MUM4?revision=3\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDY4NzU3LTU1NTY4MWk1RjU2MTM4NEUzNjg3MUM4?revision=3","title":"OlgaMolocenco_0-1709046769349.png","associationType":"TEASER","width":1600,"height":755,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDY4NzU3LTU1NTY4M2lFQzVENTM1M0VGNDc2MzE4?revision=3\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDY4NzU3LTU1NTY4M2lFQzVENTM1M0VGNDc2MzE4?revision=3","title":"OlgaMolocenco_1-1709046803281.png","associationType":"BODY","width":1600,"height":755,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDY4NzU3LTU1NTY4MmkyRkZCRDRGQzM5NkY3MDJB?revision=3\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDY4NzU3LTU1NTY4MmkyRkZCRDRGQzM5NkY3MDJB?revision=3","title":"OlgaMolocenco_2-1709046803286.png","associationType":"BODY","width":1012,"height":778,"altText":null},"BlogTopicMessage:message:4068757":{"__typename":"BlogTopicMessage","subject":"Multitenant Microsoft Fabric architecture","conversation":{"__ref":"Conversation:conversation:4068757"},"id":"message:4068757","revisionNum":3,"uid":4068757,"depth":0,"board":{"__ref":"Blog:board:FastTrackforAzureBlog"},"author":{"__ref":"User:user:1103062"},"teaser@stripHtml({\"removeProcessingText\":true,\"truncateLength\":-1})":" \n   \n   ","introduction":"","metrics":{"__typename":"MessageMetrics","views":9723},"postTime":"2024-02-27T07:25:38.596-08:00","lastPublishTime":"2024-09-04T08:14:57.396-07:00","body@stripHtml({\"removeProcessingText\":true,\"removeSpoilerMarkup\":true,\"removeTocMarkup\":true,\"truncateLength\":-1})":" You have heard this before, Microsoft Fabric is an all-in-one analytics solution that covers everything from data integration to Machine Learning, real-time analytics and Business Intelligence. The big value proposition of Microsoft Fabric is a highly integrated, end-to-end, and easy-to-use service that simplifies your analytics.  \n What is not being highlighted enough, is how compelling this platform really is for ISVs and SaaS creators that want to offer analytics to their customers, either as their main product, or as an add-on to the main offering. This blog post will focus on this use-case specifically and will show what are the features in Fabric that simplify analytics for multitenant SaaS applications.   \n Proposed architecture SaaS Analytics on Microsoft Fabric  \n \n   \n   \n Important! The architecture proposed here is most optimal for an ISV with a B2B model. For ISVs with a B2C model this could be over-architecting. The decision should be taken based on tenant isolation and other technical and business requirements.  \n Let’s look at a proposed architecture for our use case. On our left side we have all our incoming data stream. This can be anything, customer owned, or ISV owned data sources, such as Databases, CRM systems, APIs, text files, etc. This data will land in your ingestion zone, or if you are following the Medallion architecture – your Bronze layer. You will transform your data with the Data engineering experience, essentially Spark, or Dataflow Gen 2, which is a visual way of transforming data and leverages Power Query. For those that used the Power BI dataflows previously, this experience will be extremely familiar.  \n Once your data is ready, you will land it into a Lakehouse. When you create a Lakehouse in Fabric, a Power BI semantic model and a SQL endpoint are automatically created with it. You can leverage the semantic model (previously called dataset) for your Power BI reporting needs. And the SQL endpoint can be used by other clients to query and retrieve the curated data with a familiar SQL syntax.  \n And the last step is to enable your end-customer, i.e. tenants, to consume these reports. For that, you will leverage Power BI Embedded and embed your reports into your web application. Your users will sign-in to your web application and get access to the reports their identity and tenant is authorized to see. We will discuss more the authentication and specifically service-to-service authentication of this architecture in the next sections.  \n Note! The term tenant here refers to the end-customer of an ISV and not the Microsoft Entra ID, previously Azure Active Directory, tenant.  \n Microsoft Fabric features enabling SaaS applications  \n Next, we will look at ISV and SaaS specific requirements and how these are achieved in Microsoft Fabric:  \n \n Tenant isolation;  \n Tenant monitoring and billing estimation;  \n Secure authentication of users and services.  \n \n Tenant isolation  \n Workspace based isolation is easy and the recommended way to segregate your tenant’s data in Microsoft Fabric. Workspaces isolate the data at a logical level. All the Lakehouses live in the same OneLake. What this means is that each Microsoft Fabric tenant comes with a OneLake, that under the hood is an ADLS Gen 2 account, where all the data will be stored. If your organization needs, besides customer analytics, internal analytics as well, you can create shortcuts from your Internal-Analytics workspace to the tenant workspaces and query that data without having to duplicate it.  \n Each workspace is assigned to a Fabric capacity, which is a distinct pool of resources allocated to a Fabric instance. These capacities are the compute that powers all the Fabric experiences.   Capacities allow you, as an ISV, to share compute resources among your tenants. You can group multiple tenant workspaces to use the same capacity. You can also assign your big or premium tenants to bigger capacities, or individual capacities. The options here are multiple and you are given the flexibility to create a setup that makes the most sense to your business and requirements.  \n Tenant monitoring and billing  \n The Fabric Capacity Metrics app allows you to monitor storage and capacity utilization. As a SaaS vendor, the ability to monitor utilization is key to understanding how your tenants are using your service, if some are abusing the service, creating a noisy neighbor problem, or others are underutilizing it signaling potential attrition.  \n Besides leveraging this data to understand tenant utilization and behavior, you can also use it to estimate tenant consumption and determine whether you are billing your tenants proportional to their usage of the service.  \n Secure authentication of users and services  \n The following diagram shows the authentication flow for our scenario:  \n \n   \n \n User authenticates to the web application by using  \n \n An identity provider like AAD B2C, Okta, Auth0, etc.  \n Your own identity service – this is usually not recommended. Building and maintaining an identity service is complex, and security requirements are changing and evolving rapidly. It is usually more optimal to delegate this task to a vendor with track record expertise in the domain. Once the authentication happens, your user will be logged in and able to use and see content in your app that they’re authorized to see. \n \n \n Your web app uses a user-assigned managed identity to authenticate against Microsoft Entra ID.  \n Your web app gets a Microsoft Entra token from Microsoft Entra ID and uses it to access Power BI REST APIs.  \n Your web app calls an Embed Token REST API operation and requests the embed token. The embed token specifies which Power BI content can be embedded.  \n The REST API returns the embed token to your web app.  \n The web app passes the embed token to the user's web browser.  \n The web app user uses the embed token to access Power BI.  \n \n   \n Skip this paragraph if you are not curious about why user-assigned managed identity support is a great evolution. To appreciate it, we need to know that a while ago to authenticate Power BI REST APIs you could only use service principal (or a master account which is not practical for the embed for your customer scenario). Creating a service principal per tenant is practically impossible for most ISVs. Imagine building a workspace per tenant model but having to use a single service principal to authenticate that workspace? This is how it happened for a while. However, there is a limit to how many workspaces can be assigned to a service principal before you start seeing performance degradation. That limit is 1000 workspaces for a service principal. To overcome this limitation, the Power BI team created service principal profiles. This is an elegant solution to this challenge.  However, now that managed identities are supported, you don’t have to worry about any of it. All operations on managed identities can be done with ARM templates, Azure CLI, PowerShell, REST APIs, and of course, Azure Portal. This means you can automate its provisioning as part of your tenant onboarding automation.  \n Conclusion  \n In this blog post, we looked at how Microsoft Fabric can be your go-to solution for customer analytics in SaaS solutions, and what are its features that make it uniquely compelling for ISVs.  There is still a lot of ground to be covered that tackles development and deployment. And this is what we will talk about in the second part of this blog. Stay tuned.  ","body@stripHtml({\"removeProcessingText\":true,\"removeSpoilerMarkup\":true,\"removeTocMarkup\":true,\"truncateLength\":-1})@stringLength":"7815","kudosSumWeight":7,"repliesCount":2,"readOnly":false,"images":{"__typename":"AssociatedImageConnection","edges":[{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDE","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDY4NzU3LTU1NTY4MWk1RjU2MTM4NEUzNjg3MUM4?revision=3\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDI","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDY4NzU3LTU1NTY4M2lFQzVENTM1M0VGNDc2MzE4?revision=3\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDM","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDY4NzU3LTU1NTY4MmkyRkZCRDRGQzM5NkY3MDJB?revision=3\"}"}}],"totalCount":3,"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}},"videos":{"__typename":"VideoConnection","edges":[],"totalCount":0,"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}},"coverImage":null,"coverImageProperties":{"__typename":"CoverImageProperties","style":"STANDARD","titlePosition":"BOTTOM","altText":""}},"Conversation:conversation:3811148":{"__typename":"Conversation","id":"conversation:3811148","topic":{"__typename":"BlogTopicMessage","uid":3811148},"lastPostingActivityTime":"2024-07-05T13:28:18.343-07:00","solved":false},"User:user:1397656":{"__typename":"User","uid":1397656,"login":"Mauricio_Rojas","registrationData":{"__typename":"RegistrationData","status":null},"deleted":false,"avatar":{"__typename":"UserAvatar","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/m_assets/avatars/default/avatar-4.svg?time=0"},"id":"user:1397656"},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODExMTQ4LTQ2NjEwNWkyMDc0MzM0RjBBOURFNUVC?revision=5\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODExMTQ4LTQ2NjEwNWkyMDc0MzM0RjBBOURFNUVC?revision=5","title":"Mauricio_Rojas_0-1683078041816.png","associationType":"BODY","width":1872,"height":857,"altText":null},"BlogTopicMessage:message:3811148":{"__typename":"BlogTopicMessage","subject":"Multi Hub and Spoke Topology using Azure Firewalls","conversation":{"__ref":"Conversation:conversation:3811148"},"id":"message:3811148","revisionNum":5,"uid":3811148,"depth":0,"board":{"__ref":"Blog:board:FastTrackforAzureBlog"},"author":{"__ref":"User:user:1397656"},"teaser@stripHtml({\"removeProcessingText\":true,\"truncateLength\":-1})":" The fundamental model for Network Connectivity in Azure is Hub and Spoke. As compared to Azure Virtual WAN, the end user has more granular control of the routing and the ability to deploy shared resources on the Hub to be consumed by the Virtual Network (VNet) peers attached to it (Spokes). However, the existing official documentation to attain a Multi Hub and Spoke topology refers to models that include dynamic routing in the Hub using Azure Route Server and Network Virtual Appliances (NVA) which requires a level of complexity as a Network Administrator including the use of protocols like Border Gateway Protocol (BGP) and Virtual extensible Local Area Network (VxLAN). The official documentation has left out a simpler way to attain this Inter Hub connectivity with static routes.    \n   \n This article describes a simple Inter Hub and Spoke topology and walks through it’s implementation. ","introduction":"","metrics":{"__typename":"MessageMetrics","views":15802},"postTime":"2023-05-02T18:52:10.242-07:00","lastPublishTime":"2023-05-02T18:59:41.691-07:00","body@stripHtml({\"removeProcessingText\":true,\"removeSpoilerMarkup\":true,\"removeTocMarkup\":true,\"truncateLength\":-1})":" Introduction and Purpose:  \n   \n The fundamental model for Network Connectivity in Azure is Hub and Spoke. As compared to Azure Virtual WAN, the end user has more granular control of the routing and the ability to deploy shared resources on the Hub to be consumed by the Virtual Network (VNet) peers attached to it (Spokes). However, the existing official documentation to attain a Multi Hub and Spoke topology refers to models that include dynamic routing in the Hub using Azure Route Server and Network Virtual Appliances (NVA) which requires a level of complexity as a Network Administrator including the use of protocols like Border Gateway Protocol (BGP) and Virtual extensible Local Area Network (VxLAN). The official documentation has left out a simpler way to attain this Inter Hub connectivity with static routes.    \n   \n This article describes a simple Inter Hub and Spoke topology and walks through its implementation.   \n   \n Diagram: \n   \n \n Baseline Architecture for Multi  Hub and Spoke \n   \n The main purpose of the Azure Firewall, besides securing and inspecting traffic, is routing. The Azure Firewall is a managed resource that will have the routes automatically created by the platform (system routes) to the local Spokes, the Hub, and the on-premises prefixes learned by its local Virtual Network Gateway. When you place an NVA on the Hub and query the effective routes, you would find a resultant route table that is similar to what you would find within the Azure Firewall.  \n   \n Since this is a static routing architecture, the shortest path to another Hub can be accomplished using Global VNet peering between the Hubs. That way, the Hubs know about each other, and each local Firewall will contain the route table of each directly connected Hub. However, only the local Hubs know about their local Spokes. Additionally, these Hubs can be in the same region or different region.   \n   \n Routing on the Firewall Subnets: \n   \n On the other hand, the local Firewall needs to know how to reach the other Spokes, so we must place User Defined Routes (UDRs) in the Firewall subnet. In order to achieve this, the Firewall requires a default route of any type to be able to add more specific routes towards Spoke 4 and Spoke 5. A concrete example on CLI will look like this:  \n   \n   \n maroja@Azure:~$ az network route-table route list -g $rg --route-table-name $rt -o table\nAddressPrefix HasBgpOverride Name NextHopIpAddress NextHopType ProvisioningState ResourceGroup\n--------------- ---------------- -------------- ------------------ ---------------- ------------------- ----------------\n10.1.0.0/16 False to-Spoke-1-FW1 10.0.1.4 VirtualAppliance Succeeded spoke-2-rg-exr-2\n10.2.0.0/16 False to-Spoke-2-FW1 10.0.1.4 VirtualAppliance Succeeded spoke-2-rg-exr-2\n0.0.0.0/0 False default Internet Succeeded spoke-2-rg-exr-2\n\nmaroja@Azure:~$ az network route-table route list -g $rg --route-table-name $rt -o table\nAddressPrefix HasBgpOverride Name NextHopIpAddress NextHopType ProvisioningState ResourceGroup\n--------------- ---------------- ---------- ------------------ ---------------- ------------------- ---------------\n10.202.0.0/16 False to-spoke-4 10.200.1.4 VirtualAppliance Succeeded Whathehack\n10.203.0.0/16 False to-spoke-5 10.200.1.4 VirtualAppliance Succeeded Whathehack\n0.0.0.0/0 False default Internet Succeeded Whathehack\n \n   \n   \n Routing on the spokes: \n   \n The benefit of implementing this topology is that with traffic going from one Hub to another, we can reach the next hop that is directly connected via the global peering.  \n   \n As illustrated in the diagram, it is better to place a UDR in the subnets of the Spokes that have a 0/0 with Next Hop of the Local Firewall. This is because it locks in a single exit point which next hop is the local firewall, and also to reduce the risk of asymmetric routing should it learn more specific prefixes from your on-premises environment that might cause the traffic to bypass the firewall. To understand this further, I encourage the reader to explore the following resources:   \n   \n https://blog.cloudtrooper.net/2020/11/28/dont-let-your-azure-routes-bite-you/  \n   \n   \n maroja@Azure:~$ az network nic show-effective-route-table -g $rg -n $nic -o table\nSource State Address Prefix Next Hop Type Next Hop IP\n-------- ------- ---------------- ----------------- -------------\nDefault Active 10.1.0.0/16 VnetLocal\nDefault Active 10.0.0.0/16 VNetPeering\nDefault Invalid 0.0.0.0/0 Internet\nUser Active 0.0.0.0/0 VirtualAppliance 10.0.1.4\n \n   \n   \n Conclusion:   \n   \n The benefit of implementing this topology is that with traffic going from one Hub to another, we can reach the next hop that is directly connected via the global peering.   \n   \n You can also achieve this type of connectivity by substituting the Azure Firewall with a Network Virtual Appliance of your choice. In order to achieve this implementation, additional configuration within the NVA would be needed, therefore it is intended for future content.  If you wish to explore that implementation, please comment.  \n   ","body@stripHtml({\"removeProcessingText\":true,\"removeSpoilerMarkup\":true,\"removeTocMarkup\":true,\"truncateLength\":-1})@stringLength":"5713","kudosSumWeight":10,"repliesCount":12,"readOnly":false,"images":{"__typename":"AssociatedImageConnection","edges":[{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDE","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODExMTQ4LTQ2NjEwNWkyMDc0MzM0RjBBOURFNUVC?revision=5\"}"}}],"totalCount":1,"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}},"videos":{"__typename":"VideoConnection","edges":[],"totalCount":0,"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}},"coverImage":null,"coverImageProperties":{"__typename":"CoverImageProperties","style":"STANDARD","titlePosition":"BOTTOM","altText":""}},"Conversation:conversation:4024070":{"__typename":"Conversation","id":"conversation:4024070","topic":{"__typename":"BlogTopicMessage","uid":4024070},"lastPostingActivityTime":"2024-05-07T05:36:43.762-07:00","solved":false},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDExMWk0RkI1M0YwNTY0QkQzNUNF?revision=7\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDExMWk0RkI1M0YwNTY0QkQzNUNF?revision=7","title":"architecture.png","associationType":"TEASER","width":862,"height":853,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDExMmlGRkYwM0NGQkNFNjRGNjU2?revision=7\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDExMmlGRkYwM0NGQkNFNjRGNjU2?revision=7","title":"architecture.png","associationType":"BODY","width":862,"height":853,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDExNGk2MzlBOEVCOTczMkE0MjYz?revision=7\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDExNGk2MzlBOEVCOTczMkE0MjYz?revision=7","title":"workload.png","associationType":"BODY","width":1206,"height":464,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDExNWlGMkZFRDc0MjAzMEMyOTE5?revision=7\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDExNWlGMkZFRDc0MjAzMEMyOTE5?revision=7","title":"containers.png","associationType":"BODY","width":633,"height":152,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDExN2kwNDE3MEU1MEQ2MzQzQkZF?revision=7\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDExN2kwNDE3MEU1MEQ2MzQzQkZF?revision=7","title":"openai.png","associationType":"BODY","width":1007,"height":617,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDExOGk1RTBDNTI2OUMyREU5RDBD?revision=7\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDExOGk1RTBDNTI2OUMyREU5RDBD?revision=7","title":"federatedidentitycredentials.png","associationType":"BODY","width":892,"height":686,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDExOWk0NzlFNThDNjI3QUNENzM2?revision=7\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDExOWk0NzlFNThDNjI3QUNENzM2?revision=7","title":"chainlit-welcome-screen.png","associationType":"BODY","width":1399,"height":1072,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDE4OGk1ODE3ODQxQTMzQzY0REMz?revision=7\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDE4OGk1ODE3ODQxQTMzQzY0REMz?revision=7","title":"chainlit-simple-chat.png","associationType":"BODY","width":1358,"height":1132,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDE5MWkxQ0I1QzI0Q0VFNTVBRjNC?revision=7\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDE5MWkxQ0I1QzI0Q0VFNTVBRjNC?revision=7","title":"chainlit-format-result.png","associationType":"BODY","width":1399,"height":1154,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDEyM2k3NzU5RjAxNkVEODBEQjU4?revision=7\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDEyM2k3NzU5RjAxNkVEODBEQjU4?revision=7","title":"chainlit-dark-mode.png","associationType":"BODY","width":1399,"height":1124,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDEyNmkzM0FBNzE1RTQ3RDkwNUI4?revision=7\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDEyNmkzM0FBNzE1RTQ3RDkwNUI4?revision=7","title":"chainlit-before-upload.png","associationType":"BODY","width":1399,"height":1124,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDEyOGk1QUFBNDZBMjFCODc5RkIz?revision=7\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDEyOGk1QUFBNDZBMjFCODc5RkIz?revision=7","title":"chainlit-processing-documents.png","associationType":"BODY","width":1399,"height":1124,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDE5Mmk3RDJFMjQ5RTM0QzJGMjgz?revision=7\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDE5Mmk3RDJFMjQ5RTM0QzJGMjgz?revision=7","title":"chainlit-document-reply.png","associationType":"BODY","width":1358,"height":1132,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDE5M2k5OUZFOUQ1RjM1N0MyOTAz?revision=7\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDE5M2k5OUZFOUQ1RjM1N0MyOTAz?revision=7","title":"chainlit-chain-of-thought.png","associationType":"BODY","width":1358,"height":3039,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDE5NGkzOTUxODk3QThGQjc5N0E1?revision=7\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDE5NGkzOTUxODk3QThGQjc5N0E1?revision=7","title":"chainlit-source.png","associationType":"BODY","width":1358,"height":1132,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDEzM2kyRTg1QzY5REM0MEM2NzBE?revision=7\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDEzM2kyRTg1QzY5REM0MEM2NzBE?revision=7","title":"chainlit-prompt-playground.png","associationType":"BODY","width":1399,"height":1124,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDEzNGlFREU5MDY4REM0MjUzNzdG?revision=7\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDEzNGlFREU5MDY4REM0MjUzNzdG?revision=7","title":"chainlit-prompt-playground-variable.png","associationType":"BODY","width":1399,"height":1124,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDEzNWk4NjY5NzY3RDQ4QTYyOUI5?revision=7\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDEzNWk4NjY5NzY3RDQ4QTYyOUI5?revision=7","title":"chainlit-prompt-playground-question.png","associationType":"BODY","width":1399,"height":1124,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDEzNmkyQkZDRDkwRDY4ODZEQzEx?revision=7\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDEzNmkyQkZDRDkwRDY4ODZEQzEx?revision=7","title":"chainlit-prompt-playground-reply.png","associationType":"BODY","width":1399,"height":1124,"altText":null},"BlogTopicMessage:message:4024070":{"__typename":"BlogTopicMessage","subject":"Create an Azure OpenAI, LangChain, ChromaDB, and Chainlit chat app in AKS using Terraform","conversation":{"__ref":"Conversation:conversation:4024070"},"id":"message:4024070","revisionNum":7,"uid":4024070,"depth":0,"board":{"__ref":"Blog:board:FastTrackforAzureBlog"},"author":{"__ref":"User:user:988334"},"teaser@stripHtml({\"removeProcessingText\":true,\"truncateLength\":-1})":" This sample shows how to create two AKS-hosted chat applications that use OpenAI, LangChain, ChromaDB, and Chainlit using Python and deploy them to an AKS environment built in Terraform. \n \n   ","introduction":"","metrics":{"__typename":"MessageMetrics","views":27760},"postTime":"2024-01-08T09:15:13.027-08:00","lastPublishTime":"2024-05-07T05:36:43.762-07:00","body@stripHtml({\"removeProcessingText\":true,\"removeSpoilerMarkup\":true,\"removeTocMarkup\":true,\"truncateLength\":-1})":" In this sample, I demonstrate how to quickly build chat applications using Python and leveraging powerful technologies such as OpenAI ChatGPT models, Embedding models, LangChain framework, ChromaDB vector database, and Chainlit, an open-source Python package that is specifically designed to create user interfaces (UIs) for AI applications. These applications are hosted in an Azure Kubernetes Service (AKS) cluster that communicates with Azure OpenAI Service (AOAI) via an Azure Private Endpoint. \n \n Simple Chat: This simple chat application utilizes OpenAI's language models to generate real-time completion responses. \n Documents QA Chat: This chat application goes beyond simple conversations. Users can upload up to 10  .pdf  and  .docx  documents, which are then processed to create vector embeddings. These embeddings are stored in ChromaDB for efficient retrieval. Users can pose questions about the uploaded documents and view the Chain of Thought, enabling easy exploration of the reasoning process. The completion message contains links to the text chunks in the documents that were used as a source for the response. \n \n Both applications use a user-defined managed identity to authenticate and authorize against Azure OpenAI Service (AOAI) and Azure Container Registry (ACR) and use Azure Private Endpoints to connect privately and securely to these services. The chat UIs are built using Chainlit, an open-source Python package designed explicitly for creating AI applications. Chainlit seamlessly integrates with LangChain, LlamaIndex, and LangFlow, making it a powerful tool for developing ChatGPT-like applications with ease. You can find the companion code in this GitHub repository. For more information on Azure OpenAI Service and Large Language Models (LLMs), see the following articles: \n \n What is Azure OpenAI Service? \n Azure OpenAI Service models \n Large Language Model \n \n \n NOTE You can find the  architecture.vsdx  file used for the diagram under the  visio  folder. \n \n   \n Prerequisites \n \n An active Azure subscription. If you don't have one, create a free Azure account before you begin. \n Visual Studio Code installed on one of the supported platforms along with the HashiCorp Terraform. \n Azure CLI version 2.49.0 or later installed. To install or upgrade, see Install Azure CLI. \n aks-preview  Azure CLI extension of version 0.5.140 or later installed \n Terraform v1.5.2 or later. \n The deployment must be started by a user who has sufficient permissions to assign roles, such as a  User Access Administrator  or  Owner . \n Your Azure account also needs  Microsoft.Resources/deployments/write  permissions at the subscription level. \n \n   \n Architecture \n The following diagram shows the architecture and network topology deployed by the sample: \n \n   \n This project provides a set of Terraform modules to deploy thw following resources: \n \n Azure Kubernetes Service: A public or private Azure Kubernetes Service(AKS) cluster composed of a:\n \n A  system  node pool in a dedicated subnet. The default node pool hosts only critical system pods and services. The worker nodes have node taint which prevents application pods from beings scheduled on this node pool. \n A  user  node pool hosting user workloads and artifacts in a dedicated subnet. \n \n \n Azure OpenAI Service: an Azure OpenAI Service with a GPT-3.5 model used by the chatbot application. Azure OpenAI Service gives customers advanced language AI with OpenAI GPT-4, GPT-3, Codex, and DALL-E models with the security and enterprise promise of Azure. Azure OpenAI co-develops the APIs with OpenAI, ensuring compatibility and a smooth transition from one to the other. \n User-defined Managed Identity: a user-defined managed identity used by the AKS cluster to create additional resources like load balancers and managed disks in Azure. \n User-defined Managed Identity: a user-defined managed identity used by the chatbot application to acquire a security token via Microsoft Entra Workload ID to call the Chat Completion API of the ChatGPT model provided by the Azure OpenAI Service. \n Azure Virtual Machine: Terraform modules can optionally create a jump-box virtual machine to manage the private AKS cluster. \n Azure Bastion Host: a separate Azure Bastion is deployed in the AKS cluster virtual network to provide SSH connectivity to both agent nodes and virtual machines. \n Azure NAT Gateway: a bring-your-own (BYO) Azure NAT Gateway to manage outbound connections initiated by AKS-hosted workloads. The NAT Gateway is associated to the  SystemSubnet ,  UserSubnet , and  PodSubnet  subnets. The outboundType property of the cluster is set to  userAssignedNatGateway  to specify that a BYO NAT Gateway is used for outbound connections. NOTE: you can update the  outboundType  after cluster creation and this will deploy or remove resources as required to put the cluster into the new egress configuration. For more information, see Updating outboundType after cluster creation. \n Azure Storage Account: this storage account is used to store the boot diagnostics logs of both the service provider and service consumer virtual machines. Boot Diagnostics is a debugging feature that allows you to view console output and screenshots to diagnose virtual machine status. \n Azure Container Registry: an Azure Container Registry (ACR) to build, store, and manage container images and artifacts in a private registry for all container deployments. \n Azure Key Vault: an Azure Key Vault used to store secrets, certificates, and keys that can be mounted as files by pods using Azure Key Vault Provider for Secrets Store CSI Driver. For more information, see Use the Azure Key Vault Provider for Secrets Store CSI Driver in an AKS cluster and Provide an identity to access the Azure Key Vault Provider for Secrets Store CSI Driver. \n Azure Private Endpoints: an Azure Private Endpoint is created for each of the following resources:\n \n Azure OpenAI Service \n Azure Container Registry \n Azure Key Vault \n Azure Storage Account \n API Server when deploying a private AKS cluster. \n \n \n Azure Private DNDS Zones: an Azure Private DNS Zone is created for each of the following resources:\n \n Azure OpenAI Service \n Azure Container Registry \n Azure Key Vault \n Azure Storage Account \n API Server when deploying a private AKS cluster. \n \n \n Azure Network Security Group: subnets hosting virtual machines and Azure Bastion Hosts are protected by Azure Network Security Groups that are used to filter inbound and outbound traffic. \n Azure Log Analytics Workspace: a centralized Azure Log Analytics workspace is used to collect the diagnostics logs and metrics from all the Azure resources:\n \n Azure OpenAI Service \n Azure Kubernetes Service cluster \n Azure Key Vault \n Azure Network Security Group \n Azure Container Registry \n Azure Storage Account \n Azure jump-box virtual machine \n \n \n Azure Monitor workspace: An Azure Monitor workspace is a unique environment for data collected by Azure Monitor. Each workspace has its own data repository, configuration, and permissions. Log Analytics workspaces contain logs and metrics data from multiple Azure resources, whereas Azure Monitor workspaces currently contain only metrics related to Prometheus. Azure Monitor managed service for Prometheus allows you to collect and analyze metrics at scale using a Prometheus-compatible monitoring solution, based on the Prometheus. This fully managed service allows you to use the Prometheus query language (PromQL) to analyze and alert on the performance of monitored infrastructure and workloads without having to operate the underlying infrastructure. The primary method for visualizing Prometheus metrics is Azure Managed Grafana. You can connect your Azure Monitor workspace to an Azure Managed Grafana to visualize Prometheus metrics using a set of built-in and custom Grafana dashboards. \n Azure Managed Grafana: an Azure Managed Grafana instance used to visualize the Prometheus metrics generated by the Azure Kubernetes Service(AKS) cluster deployed by the Bicep modules. Azure Managed Grafana is a fully managed service for analytics and monitoring solutions. It's supported by Grafana Enterprise, which provides extensible data visualizations. This managed service allows to quickly and easily deploy Grafana dashboards with built-in high availability and control access with Azure security. \n NGINX Ingress Controller: this sample compares the managed and unmanaged NGINX Ingress Controller. While the managed version is installed using the Application routing add-on, the unmanaged version is deployed using the Helm Terraform Provider. You can use the Helm provider to deploy software packages in Kubernetes. The provider needs to be configured with the proper credentials before it can be used. \n Cert-Manager: the  cert-manager  package and Let's Encrypt certificate authority are used to issue a TLS/SSL certificate to the chat applications. \n Prometheus: the AKS cluster is configured to collect metrics to the Azure Monitor workspace and Azure Managed Grafana. Nonetheless, the kube-prometheus-stack Helm chart is used to install Prometheus and Grafana on the AKS cluster. \n Workload namespace and service account: the Kubectl Terraform Provider and Kubernetes Terraform Provider are used to create the namespace and service account used by the chat applications. \n Azure Monitor ConfigMaps for Azure Monitor managed service for Prometheus and  cert-manager  Cluster Issuer are deployed using the Kubectl Terraform Provider and Kubernetes Terraform Provider.` \n \n The architecture of the  chat  application can be seen in the image below. The same architecture is also adopted by the  docs  application. \n \n   \n The technologies and frameworks utilized by both applications are depicted in the next figure. Both applications are written in Python. \n \n   \n \n NOTE In a production environment, we strongly recommend deploying a private AKS cluster with Uptime SLA. For more information, see private AKS cluster with a Public DNS address. Alternatively, you can deploy a public AKS cluster and secure access to the API server using authorized IP address ranges. \n \n   \n What is Azure OpenAI Service? \n The Azure OpenAI Service is a platform offered by Microsoft Azure that provides cognitive services powered by OpenAI models. One of the models available through this service is the ChatGPT model, which is designed for interactive conversational tasks. It allows developers to integrate natural language understanding and generation capabilities into their applications. \n Azure OpenAI Service provides REST API access to OpenAI's powerful language models including the GPT-3, Codex and Embeddings model series. In addition, the new GPT-4 and ChatGPT model series have now reached general availability. These models can be easily adapted to your specific task including but not limited to content generation, summarization, semantic search, and natural language to code translation. Users can access the service through REST APIs, Python SDK, or our web-based interface in the Azure OpenAI Studio. \n The Chat Completion API, which is part of the Azure OpenAI Service, provides a dedicated interface for interacting with the ChatGPT and GPT-4 models. This API is currently in preview and is the preferred method for accessing these models. The GPT-4 models can only be accessed through this API. \n GPT-3, GPT-3.5, and GPT-4 models from OpenAI are prompt-based. With prompt-based models, the user interacts with the model by entering a text prompt, to which the model responds with a text completion. This completion is the model’s continuation of the input text. While these models are extremely powerful, their behavior is also very sensitive to the prompt. This makes prompt construction an important skill to develop. For more information, see Introduction to prompt engineering. \n Prompt construction can be difficult. In practice, the prompt acts to configure the model weights to complete the desired task, but it's more of an art than a science, often requiring experience and intuition to craft a successful prompt. The goal of this article is to help get you started with this learning process. It attempts to capture general concepts and patterns that apply to all GPT models. However it's important to understand that each model behaves differently, so the learnings may not apply equally to all models. \n Prompt engineering refers to the process of creating instructions called prompts for Large Language Models (LLMs), such as OpenAI’s ChatGPT. With the immense potential of LLMs to solve a wide range of tasks, leveraging prompt engineering can empower us to save significant time and facilitate the development of impressive applications. It holds the key to unleashing the full capabilities of these huge models, transforming how we interact and benefit from them. For more information, see Prompt engineering techniques. \n   \n Vector Databases \n A vector database is a specialized database that goes beyond traditional storage by organizing information to simplify the search for similar items. Instead of merely storing words or numbers, it leverages vector embeddings - unique numerical representations of data. These embeddings capture meaning, context, and relationships. For instance, words are represented as vectors, whereas similar words have similar vector values. \n The applications of vector databases are numerous and powerful. In language processing, they facilitate the discovery of related documents or sentences. By comparing the vector embeddings of different texts, finding similar or related information becomes faster and more efficient. This capability benefits search engines and recommendation systems, which can suggest relevant articles or products based on user interests. \n In the realm of image analysis, vector databases excel in finding visually similar images. By representing images as vectors, a simple comparison of vector values can identify visually similar images. This capability is highly valuable for tasks like reverse image search or content-based image retrieval. \n Additionally, vector databases find applications in fraud detection, anomaly detection, and clustering. By comparing vector embeddings of data points, unusual patterns can be detected, and similar items can be grouped together, aiding in effective data analysis and decision-making. This is a list of Azure services that are suitable for use as a vector database in a retrieval-augmented generation (RAG) solution: \n   \n \n \n Azure Cosmos DB for MongoDB vCore: vCore-based Azure Cosmos DB for MongoDB provides developers with a fully managed MongoDB-compatible database service for building modern applications with a familiar architecture. Developers can enjoy the benefits of native Azure integrations, low total cost of ownership (TCO), and the familiar vCore architecture when migrating existing applications or building new ones. Azure Cosmos DB for MongoDB features built-in vector database capabilities enabling your data and vectors to be stored together for efficient and accurate vector searches. \n \n \n Azure Cosmos DB for NoSQL: Azure Cosmos DB for NoSQL is a globally distributed database service designed for scalable and high performance applications. It offers an industry-leading 99.999% Service Level Agreement (SLA), ensuring high availability for your mission-critical applications. With sub-10ms point reads and instant autoscale, it provides lightning-fast data access and seamless scalability. Its flexible, schemaless data model allows for agile and adaptable application development. Moreover, Azure Cosmos DB’s built-in vector index using DiskANN enables fast, accurate, and cost-effective vector search at any scale, enhancing the efficiency and effectiveness of your data-driven applications. \n \n \n Azure Cosmos DB for PostgreSQL You can use the natively integrated vector database in Azure Cosmos DB for PostgreSQL, which offers an efficient way to store, index, and search high-dimensional vector data directly alongside other application data. This approach removes the necessity of migrating your data to costlier alternative vector databases and provides a seamless integration of your AI-driven applications. \n \n \n Azure Cache for Redis Azure Cache for Redis can be used as a vector database by combining it models like Azure OpenAI for Retrieval-Augmented Generative AI and analysis scenarios. \n \n \n   \n Here is a list of the most popular vector databases: \n   \n \n ChromaDB is a powerful database solution that stores and retrieves vector embeddings efficiently. It is commonly used in AI applications, including chatbots and document analysis systems. By storing embeddings in ChromaDB, users can easily search and retrieve similar vectors, enabling faster and more accurate matching or recommendation processes. ChromaDB offers excellent scalability high performance, and supports various indexing techniques to optimize search operations. It is a versatile tool that enhances the functionality and efficiency of AI applications that rely on vector embeddings. \n Facebook AI Similarity Search (FAISS) is another widely used vector database. Facebook AI Research develops it and offers highly optimized algorithms for similarity search and clustering of vector embeddings. FAISS is known for its speed and scalability, making it suitable for large-scale applications. It offers different indexing methods like flat, IVF (Inverted File System), and HNSW (Hierarchical Navigable Small World) to organize and search vector data efficiently. \n SingleStore: SingleStore aims to deliver the world’s fastest distributed SQL database for data-intensive applications: SingleStoreDB, which combines transactional + analytical workloads in a single platform. \n Astra DB: DataStax Astra DB is a cloud-native, multi-cloud, fully managed database-as-a-service based on Apache Cassandra, which aims to accelerate application development and reduce deployment time for applications from weeks to minutes. \n Milvus: Milvus is an open source vector database built to power embedding similarity search and AI applications. Milvus makes unstructured data search more accessible and provides a consistent user experience regardless of the deployment environment. Milvus 2.0 is a cloud-native vector database with storage and computation separated by design. All components in this refactored version of Milvus are stateless to enhance elasticity and flexibility. \n Qdrant: Qdrant is a vector similarity search engine and database for AI applications. Along with open-source, Qdrant is also available in the cloud. It provides a production-ready service with an API to store, search, and manage points—vectors with an additional payload. Qdrant is tailored to extended filtering support. It makes it useful for all sorts of neural-network or semantic-based matching, faceted search, and other applications. \n Pinecone: Pinecone is a fully managed vector database that makes adding vector search to production applications accessible. It combines state-of-the-art vector search libraries, advanced features such as filtering, and distributed infrastructure to provide high performance and reliability at any scale. \n Vespa: Vespa is a platform for applications combining data and AI, online. By building such applications on Vespa helps users avoid integration work to get features, and it can scale to support any amount of traffic and data. To deliver that, Vespa provides a broad range of query capabilities, a computation engine with support for modern machine-learned models, hands-off operability, data management, and application development support. It is free and open source to use under the Apache 2.0 license. \n Zilliz: Milvus is an open-source vector database, with over 18,409 stars on GitHub and 3.4 million+ downloads. Milvus supports billion-scale vector search and has over 1,000 enterprise users. Zilliz Cloud provides a fully-managed Milvus service made by the creators of Milvus. This helps to simplify the process of deploying and scaling vector search applications by eliminating the need to create and maintain complex data infrastructure. As a DBaaS, Zilliz simplifies the process of deploying and scaling vector search applications by eliminating the need to create and maintain complex data infrastructure. \n Weaviate: Weaviate is an open-source vector database used to store data objects and vector embeddings from ML-models, and scale into billions of data objects from the same name company in Amsterdam. Users can index billions of data objects to search through and combine multiple search techniques, such as keyword-based and vector search, to provide search experiences. \n \n   \n This sample makes of ChromaDB vector database, but you can easily modify the code to use another vector database. You can even use Azure Cache for Redis Enterprise to store the vector embeddings and compute vector similarity with high performance and low latency. For more information, see Vector Similarity Search with Azure Cache for Redis Enterprise. \n   \n LangChain \n LangChain is a software framework designed to streamline the development of applications using large language models (LLMs). It serves as a language model integration framework, facilitating various applications like document analysis and summarization, chatbots, and code analysis. \n LangChain's integrations cover an extensive range of systems, tools, and services, making it a comprehensive solution for language model-based applications. LangChain integrates with the major cloud platforms such as Microsoft Azure, Amazon AWS, and Google, and with API wrappers for various purposes like news, movie information, and weather, as well as support for Bash, web scraping, and more. It also supports multiple language models, including those from OpenAI, Anthropic, and Hugging Face. Moreover, LangChain offers various functionalities for document handling, code generation, analysis, debugging, and interaction with databases and other data sources. \n   \n Chainlit \n Chainlit is an open-source Python package that is specifically designed to create user interfaces (UIs) for AI applications. It simplifies the process of building interactive chats and interfaces, making developing AI-powered applications faster and more efficient. While Streamlit is a general-purpose UI library, Chainlit is purpose-built for AI applications and seamlessly integrates with other AI technologies such as LangChain, LlamaIndex, and LangFlow. \n With Chainlit, developers can easily create intuitive UIs for their AI models, including ChatGPT-like applications. It provides a user-friendly interface for users to interact with AI models, enabling conversational experiences and information retrieval. Chainlit also offers unique features, such as the ability to display the Chain of Thought, which allows users to explore the reasoning process directly within the UI. This feature enhances transparency and enables users to understand how the AI arrives at its responses or recommendations. \n For more information, see the following resources: \n   \n \n Documentation \n Examples \n API Reference \n Cookbook \n \n   \n Managed NGINX Ingress Controller for Azure Kubernetes Service \n One way to route HTTP and secure HTTPS traffic to applications in an Azure Kubernetes Service (AKS) cluster is by using the Kubernetes Ingress object. The application routing add-on for AKS enables you to create, configure, and manage one or more Ingress controllers within your AKS cluster using the NGINX Ingress Controller. \n The application routing add-on with NGINX provides several features, including: \n   \n \n Easy configuration of managed NGINX Ingress controllers based on the Kubernetes NGINX Ingress controller. \n Integration with Azure DNS for public and private zone management. \n SSL termination with certificates stored in Azure Key Vault. \n \n To enable the application routing add-on on an existing cluster, you can use Azure CLI, as shown in the following code snippet. \n \n az aks approuting enable -g <ResourceGroupName> -n <ClusterName> \n \n Once enabled, you can connect to your AKS cluster, deploy applications, and create Ingress objects with appropriate annotations for routing. There are some limitations to be aware of, such as the maximum number of supported Azure DNS zones and namespace editing restrictions. It's recommended to review the application routing add-on configuration for additional information on SSL encryption and DNS integration. \n If you are familiar with the NGINX ingress controller, you can just replace the  nginx  ingress class name inside an ingress object with the name of the ingress controller deployed by the application routing addon, that, by default is equal to  webapprouting.kubernetes.azure.com : \n \n apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n name: chat-ingress-webapprouting\n annotations:\n cert-manager.io/cluster-issuer: letsencrypt-webapprouting\n cert-manager.io/acme-challenge-type: http01 \n nginx.ingress.kubernetes.io/affinity: \"cookie\"\n nginx.ingress.kubernetes.io/proxy-connect-timeout: \"3600\"\n nginx.ingress.kubernetes.io/proxy-send-timeout: \"3600\"\n nginx.ingress.kubernetes.io/proxy-read-timeout: \"3600\"\n nginx.ingress.kubernetes.io/proxy-next-upstream-timeout: \"3600\"\n nginx.ingress.kubernetes.io/enable-cors: \"true\"\n nginx.ingress.kubernetes.io/cors-allow-origin: \"*\"\n nginx.ingress.kubernetes.io/cors-allow-credentials: \"false\"\nspec:\n ingressClassName: webapprouting.kubernetes.azure.com\n tls:\n - hosts:\n - chat.babosbird.com\n secretName: chat-tls-secret-webapprouting\n rules:\n - host: chat.babosbird.com\n http:\n paths:\n - path: /\n pathType: Prefix\n backend:\n service:\n name: chat\n port:\n number: 8000 \n \n If you leverage  cert-manager  and with Let's Encrypt certificate authority to issue TLS/SSL certificates to your application, make sure to create an issuer or a cluster issuer for the ingress class of the managed NGINX ingress controller installed by the application routing add-on. This can be done using the sample code provided below: \n \n apiVersion: cert-manager.io/v1\nkind: ClusterIssuer\nmetadata:\n annotations:\n name: letsencrypt-webapprouting\nspec:\n acme:\n email: admin@contoso.com\n privateKeySecretRef:\n name: letsencrypt-webapprouting\n server: https://acme-v02.api.letsencrypt.org/directory\n solvers:\n - http01:\n ingress:\n class: webapprouting.kubernetes.azure.com\n podTemplate:\n metadata: {}\n spec:\n nodeSelector:\n kubernetes.io/os: linux \n \n Ensure that you replace  admin@contoso.com  with your own email address to receive notifications from Let's Encrypt. By using this configuration,  cert-manager  will be able to issue certificates for the ingress class of the managed NGINX ingress controller when using the application routing add-on. Please note that the server URL  https://acme-v02.api.letsencrypt.org/directory  is the Let's Encrypt production server. You can also use the staging server  https://acme-staging-v02.api.letsencrypt.org/directory  for testing purposes to avoid rate limits. Ensure that the issuer or cluster issuer resource is deployed to your Kubernetes cluster, and  cert-manager  is properly installed and configured. For more detailed steps and instructions, refer to Managed nginx Ingress with the application routing add-on. \n   \n Deploy the Terraform modules \n Before deploying the Terraform modules in the  terraform  folder, specify a value for the following variables in the terraform.tfvars variable definitions file. \n   \n   \n name_prefix = \"Contoso\"\ndomain = \"contoso.com\"\nkubernetes_version = \"1.28.3\"\nnamespace = \"chainlit\"\nservice_account_name = \"chainlit-sa\"\nssh_public_key = \"XXXXXXX\"\nvm_enabled = true\nlocation = \"eastus\"\nadmin_group_object_ids = [\"XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX\"] \nweb_app_routing_enabled = true\ndns_zone_name = \"babosbird.com\"\ndns_zone_resource_group_name = \"DnsResourceGroup\"\n\ngrafana_admin_user_object_id = \"0c5267b2-01f3-4a59-970e-0d9218d5412e\"\nvnet_integration_enabled = true\nopenai_deployments = [\n {\n name = \"gpt-35-turbo-16k\"\n model = {\n name = \"gpt-35-turbo-16k\"\n version = \"0613\"\n }\n },\n {\n name = \"text-embedding-ada-002\"\n model = {\n name = \"text-embedding-ada-002\"\n version = \"2\"\n }\n }\n]\n \n   \n   \n Description: \n   \n \n prefix : specifies a prefix for all the Azure resources. \n domain : specifies the domain part (e.g., subdomain.domain) of the hostname of the ingress object used to expose the chatbot via the NGINX Ingress Controller. \n kubernetes_version : specifies the Kubernetes version installed on the AKS cluster. \n namespace : specifies the namespace of the workload application that accesses the Azure OpenAI Service. \n service_account_name : specifies the name of the service account of the workload application that accesses the Azure OpenAI Service. \n ssh_public_key : specifies the SSH public key used for the AKS nodes and jumpbox virtual machine. \n vm_enabled : a boleean value that specifies whether deploying or not a jumpbox virtual machine in the same virtual network of the AKS cluster. \n location : specifies the region (e.g., westeurope) where deploying the Azure resources. \n admin_group_object_ids : when deploying an AKS cluster with Microsoft Entra ID and Azure RBAC integration, this array parameter contains the list of Microsoft Entra ID group object IDs that will have the admin role of the cluster. \n web_app_routing_enabled : specifies whether the application routing add-on is enabled. When enabled, this add-on installs a managed instance of the NGINX Ingress Controller on the AKS cluster. \n dns_zone_name : specifies the name of the Azure Public DNS zone used by the application routing add-on. \n dns_zone_resource_group_name : specifies the resource group name of the Azure Public DNS zone used by the application routing add-on. \n grafana_admin_user_object_id : specifies the object id of the Azure Managed Grafana administrator user account. \n vnet_integration_enabled : specifies whether API Server VNet Integration is enabled. \n openai_deployments : specifies the list of the Azure OpenAI Service models to create. This sample needs a  GPT  model for chat completions and an embeddings model. \n \n \n NOTE We suggest reading sensitive configuration data such as passwords or SSH keys from a pre-existing Azure Key Vault resource. For more information, see Referencing Azure Key Vault secrets in Terraform. Before proceeding, also make sure to run the  register-preview-features.sh  Bash script in the  terraform  folder to register any preview feature used by the AKS cluster. \n \n   \n OpenAI Terraform Module \n The following table contains the code from the  openai.tf  Terraform module used to deploy the Azure OpenAI Service. \n   \n   \n resource \"azurerm_cognitive_account\" \"openai\" {\n name = var.name\n location = var.location\n resource_group_name = var.resource_group_name\n kind = \"OpenAI\"\n custom_subdomain_name = var.custom_subdomain_name\n sku_name = var.sku_name\n public_network_access_enabled = var.public_network_access_enabled\n tags = var.tags\n\n identity {\n type = \"SystemAssigned\"\n }\n\n lifecycle {\n ignore_changes = [\n tags\n ]\n }\n}\n\nresource \"azurerm_cognitive_deployment\" \"deployment\" {\n for_each = {for deployment in var.deployments: deployment.name => deployment}\n\n name = each.key\n cognitive_account_id = azurerm_cognitive_account.openai.id\n\n model {\n format = \"OpenAI\"\n name = each.value.model.name\n version = each.value.model.version\n }\n\n scale {\n type = \"Standard\"\n }\n}\n\nresource \"azurerm_monitor_diagnostic_setting\" \"settings\" {\n name = \"DiagnosticsSettings\"\n target_resource_id = azurerm_cognitive_account.openai.id\n log_analytics_workspace_id = var.log_analytics_workspace_id\n\n enabled_log {\n category = \"Audit\"\n }\n\n enabled_log {\n category = \"RequestResponse\"\n }\n\n enabled_log {\n category = \"Trace\"\n }\n\n metric {\n category = \"AllMetrics\"\n }\n}\n \n   \n   \n Azure Cognitive Services use custom subdomain names for each resource created through the Azure portal, Azure Cloud Shell, Azure CLI, Bicep, Azure Resource Manager (ARM), or Terraform. Unlike regional endpoints, which were common for all customers in a specific Azure region, custom subdomain names are unique to the resource. Custom subdomain names are required to enable features like Microsoft Entra ID for authentication. In our case, we need to specify a custom subdomain for our Azure OpenAI Service as our chatbot application will use an Microsoft Entra ID security token to access it. By default, the  main.tf  module sets the value of the  custom_subdomain_name  parameter to the lowercase name of the Azure OpenAI resource. For more information on custom subdomains, see Custom subdomain names for Cognitive Services. \n This terraform module allows you to pass an array containing the definition of one or more model deployments in the  deployments  parameter. For more information on model deployments, see Create a resource and deploy a model using Azure OpenAI. As an alternative, you can use the Terraform module for deploying Azure OpenAI Service. to deploy an Azure OpenAI Service. \n   \n AKS Terraform Module \n The following table shows the code of the Terraform module used to deploy the Azure Kubernetes Service (AKS) cluster with a user-assigned managed identity. For more information on the  azurerm_kubernetes_cluster  resource, see Terraform documentation. \n \n resource \"azurerm_user_assigned_identity\" \"aks_identity\" {\n resource_group_name = var.resource_group_name\n location = var.location\n tags = var.tags\n\n name = \"${var.name}Identity\"\n\n lifecycle {\n ignore_changes = [\n tags\n ]\n }\n}\n\nresource \"azurerm_kubernetes_cluster\" \"aks_cluster\" {\n name = var.name\n location = var.location\n resource_group_name = var.resource_group_name\n kubernetes_version = var.kubernetes_version\n dns_prefix = var.dns_prefix\n private_cluster_enabled = var.private_cluster_enabled\n automatic_channel_upgrade = var.automatic_channel_upgrade\n sku_tier = var.sku_tier\n workload_identity_enabled = var.workload_identity_enabled\n oidc_issuer_enabled = var.oidc_issuer_enabled\n open_service_mesh_enabled = var.open_service_mesh_enabled\n image_cleaner_enabled = var.image_cleaner_enabled\n image_cleaner_interval_hours = var.image_cleaner_interval_hours\n azure_policy_enabled = var.azure_policy_enabled\n http_application_routing_enabled = var.http_application_routing_enabled\n\n default_node_pool {\n name = var.system_node_pool_name\n vm_size = var.system_node_pool_vm_size\n vnet_subnet_id = var.vnet_subnet_id\n pod_subnet_id = var.pod_subnet_id\n zones = var.system_node_pool_availability_zones\n node_labels = var.system_node_pool_node_labels\n node_taints = var.system_node_pool_node_taints\n enable_auto_scaling = var.system_node_pool_enable_auto_scaling\n enable_host_encryption = var.system_node_pool_enable_host_encryption\n enable_node_public_ip = var.system_node_pool_enable_node_public_ip\n max_pods = var.system_node_pool_max_pods\n max_count = var.system_node_pool_max_count\n min_count = var.system_node_pool_min_count\n node_count = var.system_node_pool_node_count\n os_disk_type = var.system_node_pool_os_disk_type\n tags = var.tags\n }\n\n linux_profile {\n admin_username = var.admin_username\n ssh_key {\n key_data = var.ssh_public_key\n }\n }\n\n identity {\n type = \"UserAssigned\"\n identity_ids = tolist([azurerm_user_assigned_identity.aks_identity.id])\n }\n\n network_profile {\n dns_service_ip = var.network_dns_service_ip\n network_plugin = var.network_plugin\n outbound_type = var.outbound_type\n service_cidr = var.network_service_cidr\n }\n\n oms_agent {\n msi_auth_for_monitoring_enabled = true\n log_analytics_workspace_id = coalesce(var.oms_agent.log_analytics_workspace_id, var.log_analytics_workspace_id)\n }\n\n dynamic \"web_app_routing\" {\n for_each = var.web_app_routing.enabled ? [1] : []\n\n content {\n dns_zone_id = var.web_app_routing.dns_zone_id\n }\n }\n\n dynamic \"ingress_application_gateway\" {\n for_each = try(var.ingress_application_gateway.gateway_id, null) == null ? [] : [1]\n\n content {\n gateway_id = var.ingress_application_gateway.gateway_id\n subnet_cidr = var.ingress_application_gateway.subnet_cidr\n subnet_id = var.ingress_application_gateway.subnet_id\n }\n }\n\n api_server_access_profile {\n authorized_ip_ranges = var.authorized_ip_ranges == null ? [] : var.authorized_ip_ranges\n subnet_id = var.api_server_subnet_id\n vnet_integration_enabled = var.vnet_integration_enabled\n }\n\n azure_active_directory_role_based_access_control {\n managed = true\n tenant_id = var.tenant_id\n admin_group_object_ids = var.admin_group_object_ids\n azure_rbac_enabled = var.azure_rbac_enabled\n }\n\n key_vault_secrets_provider {\n secret_rotation_enabled = true\n secret_rotation_interval = \"2m\"\n }\n\n workload_autoscaler_profile {\n keda_enabled = var.keda_enabled\n vertical_pod_autoscaler_enabled = var.vertical_pod_autoscaler_enabled\n }\n \n monitor_metrics {\n annotations_allowed = var.annotations_allowed\n labels_allowed = var.labels_allowed\n }\n\n lifecycle {\n ignore_changes = [\n kubernetes_version,\n tags\n ]\n }\n}\n\nresource \"azurerm_monitor_diagnostic_setting\" \"settings\" {\n name = \"DiagnosticsSettings\"\n target_resource_id = azurerm_kubernetes_cluster.aks_cluster.id\n log_analytics_workspace_id = var.log_analytics_workspace_id\n\n enabled_log {\n category = \"kube-apiserver\"\n }\n\n enabled_log {\n category = \"kube-audit\"\n }\n\n enabled_log {\n category = \"kube-audit-admin\"\n }\n\n enabled_log {\n category = \"kube-controller-manager\"\n }\n\n enabled_log {\n category = \"kube-scheduler\"\n }\n\n enabled_log {\n category = \"cluster-autoscaler\"\n }\n\n enabled_log {\n category = \"guard\"\n }\n\n metric {\n category = \"AllMetrics\"\n }\n}\n\nresource \"local_file\" \"kubeconfig\" {\n filename = \"${path.module}/kubeconfig\"\n content = azurerm_kubernetes_cluster.aks_cluster.kube_admin_config_raw\n depends_on = [azurerm_kubernetes_cluster.aks_cluster]\n} \n \n This module allows to deploy an Azure Kubernetes Service cluster with the following extensions and features: \n   \n \n API Server VNET Integration allows you to enable network communication between the API server and the cluster nodes without requiring a private link or tunnel. AKS clusters with API Server VNET integration provide a series of advantages, for example, they can have public network access or private cluster mode enabled or disabled without redeploying the cluster. For more information, see Create an Azure Kubernetes Service cluster with API Server VNet Integration. \n Azure NAT Gateway to manage outbound connections initiated by AKS-hosted workloads. \n Event-driven Autoscaling (KEDA) add-on is a single-purpose and lightweight component that strives to make application autoscaling simple and is a CNCF Incubation project. \n Vertical Pod Autoscaling allows you to automatically sets resource requests and limits on containers per workload based on past usage. VPA makes certain pods are scheduled onto nodes that have the required CPU and memory resources. For more information, see Kubernetes Vertical Pod Autoscaling. \n Azure Key Vault Provider for Secrets Store CSI Driver provides a variety of methods of identity-based access to your Azure Key Vault. \n Image Cleaner to clean up stale images on your Azure Kubernetes Service cluster. \n Application routing add-on: this addon allows to deploy one or more instances of a managed NGINX ingress controller. The managed NGINX ingress controller deployed via the application routing add-on exposes many metrics for requests, the nginx process, and the controller that can be helpful in analyzing the performance and usage of your application. For more information, see Monitor the ingress-nginx controller metrics in the application routing add-on with Prometheus in Grafana. \n \n   \n \n NOTE You can deploy an AKS resource as a public cluster with API Server VNet Integration enabled. During the installation, you can use Terraform modules that make use of the Helm, Kubectl, and Kubernetes Terraform Providers to install packages and create Kubernetes entities. Once the installation is complete, you can turn the cluster to private. \n \n   \n Private Endpoint and Private DNS Zone Terraform Modules \n The  main.tf  module creates Azure Private Endpoints and Azure Private DNDS Zones for each of the following resources: \n   \n \n Azure OpenAI Service \n Azure Container Registry \n Azure Key Vault \n Azure Storage Account \n \n   \n In particular, it creates an Azure Private Endpoint and Azure Private DNDS Zone to the Azure OpenAI Service as shown in the following code snippet: \n \n module \"openai_private_dns_zone\" {\n source = \"./modules/private_dns_zone\"\n name = \"privatelink.openai.azure.com\"\n resource_group_name = azurerm_resource_group.rg.name\n tags = var.tags\n virtual_networks_to_link = {\n (module.virtual_network.name) = {\n subscription_id = data.azurerm_client_config.current.subscription_id\n resource_group_name = azurerm_resource_group.rg.name\n }\n }\n}\n\nmodule \"openai_private_endpoint\" {\n source = \"./modules/private_endpoint\"\n name = \"${module.openai.name}PrivateEndpoint\"\n location = var.location\n resource_group_name = azurerm_resource_group.rg.name\n subnet_id = module.virtual_network.subnet_ids[var.vm_subnet_name]\n tags = var.tags\n private_connection_resource_id = module.openai.id\n is_manual_connection = false\n subresource_name = \"account\"\n private_dns_zone_group_name = \"AcrPrivateDnsZoneGroup\"\n private_dns_zone_group_ids = [module.acr_private_dns_zone.id]\n} \n \n Below you can read the code of the  private_dns_zone  and  private_endpoint  modules used, respectively, to create the Azure Private Endpoints and Azure Private DNDS Zones. \n   \n   \n resource \"azurerm_private_dns_zone\" \"private_dns_zone\" {\n name = var.name\n resource_group_name = var.resource_group_name\n tags = var.tags\n\n lifecycle {\n ignore_changes = [\n tags\n ]\n }\n}\n\nresource \"azurerm_private_dns_zone_virtual_network_link\" \"link\" {\n for_each = var.virtual_networks_to_link\n\n name = \"link_to_${lower(basename(each.key))}\"\n resource_group_name = var.resource_group_name\n private_dns_zone_name = azurerm_private_dns_zone.private_dns_zone.name\n virtual_network_id = \"/subscriptions/${each.value.subscription_id}/resourceGroups/${each.value.resource_group_name}/providers/Microsoft.Network/virtualNetworks/${each.key}\"\n\n lifecycle {\n ignore_changes = [\n tags\n ]\n }\n} \n   \n   \n \n resource \"azurerm_private_endpoint\" \"private_endpoint\" {\n name = var.name\n location = var.location\n resource_group_name = var.resource_group_name\n subnet_id = var.subnet_id\n tags = var.tags\n\n private_service_connection {\n name = \"${var.name}Connection\"\n private_connection_resource_id = var.private_connection_resource_id\n is_manual_connection = var.is_manual_connection\n subresource_names = try([var.subresource_name], null)\n request_message = try(var.request_message, null)\n }\n\n private_dns_zone_group {\n name = var.private_dns_zone_group_name\n private_dns_zone_ids = var.private_dns_zone_group_ids\n }\n\n lifecycle {\n ignore_changes = [\n tags\n ]\n }\n} \n \n Workload User-Defined Managed Identity \n The following code snippet from the  main.tf  Terraform module creates the user-defined managed identity used by the chatbot to acquire a security token from Microsoft Entra ID via Microsoft Entra Workload ID. \n \n resource \"azurerm_user_assigned_identity\" \"aks_workload_identity\" {\n name = var.name_prefix == null ? \"${random_string.prefix.result}${var.workload_managed_identity_name}\" : \"${var.name_prefix}${var.workload_managed_identity_name}\"\n resource_group_name = azurerm_resource_group.rg.name\n location = var.location\n tags = var.tags\n\n lifecycle {\n ignore_changes = [\n tags\n ]\n }\n}\n\nresource \"azurerm_role_assignment\" \"cognitive_services_user_assignment\" {\n scope = module.openai.id\n role_definition_name = \"Cognitive Services User\"\n principal_id = azurerm_user_assigned_identity.aks_workload_identity.principal_id\n skip_service_principal_aad_check = true\n}\n\nresource \"azurerm_federated_identity_credential\" \"federated_identity_credential\" {\n name = \"${title(var.namespace)}FederatedIdentity\"\n resource_group_name = azurerm_resource_group.rg.name\n audience = [\"api://AzureADTokenExchange\"]\n issuer = module.aks_cluster.oidc_issuer_url\n parent_id = azurerm_user_assigned_identity.aks_workload_identity.id\n subject = \"system:serviceaccount:${var.namespace}:${var.service_account_name}\"\n} \n \n The above code snippet performs the following steps: \n   \n \n Creates a new user-defined managed identity. \n Assign the new managed identity to the Cognitive Services User role with the resource group as a scope. \n Federate the managed identity with the service account used by the chatbot. The following information is necessary to create the federated identity credentials:\n \n The Kubernetes service account name. \n The Kubernetes namespace that will host the chatbot application. \n The URL of the OpenID Connect (OIDC) token issuer endpoint for Microsoft Entra Workload ID \n \n \n \n   \n For more information, see the following resources: \n   \n \n How to configure Azure OpenAI Service with managed identities \n Use Microsoft Entra Workload ID with Azure Kubernetes Service (AKS) \n \n   \n Validate the deployment \n Open the Azure Portal, and navigate to the resource group. Open the Azure Open AI Service resource, navigate to  Keys and Endpoint , and check that the endpoint contains a custom subdomain rather than the regional Cognitive Services endpoint. \n \n   \n Open to the  <Prefix>WorkloadManagedIdentity  managed identity, navigate to the  Federated credentials , and verify that the federated identity credentials for the  chainlit-sa  service account were created correctly, as shown in the following picture. \n   \n \n Use Microsoft Entra Workload ID with Azure Kubernetes Service (AKS) \n Workloads deployed on an Azure Kubernetes Services (AKS) cluster require Microsoft Entra ID application credentials or managed identities to access Microsoft Entra ID protected resources, such as Azure Key Vault and Microsoft Graph. Microsoft Entra Workload ID integrates with the capabilities native to Kubernetes to federate with external identity providers. \n Microsoft Entra Workload ID uses Service Account Token Volume Projection to enable pods to use a Kubernetes service account. When enabled, the AKS OIDC Issuer issues a service account security token to a workload and OIDC federation enables the application to access Azure resources securely with Microsoft Entra ID based on annotated service accounts. \n Microsoft Entra Workload ID works well with the Azure Identity client libraries and the Microsoft Authentication Library (MSAL) collection if you use a registered application instead of a managed identity. Your workload can use any of these libraries to seamlessly authenticate and access Azure cloud resources. For more information, see the following resources: \n   \n \n Azure Workload Identity open-source project \n Use an Microsoft Entra Workload ID on Azure Kubernetes Service (AKS \n Deploy and configure workload identity on an Azure Kubernetes Service (AKS) cluster \n Modernize application authentication with workload identity sidecar \n Tutorial: Use a workload identity with an application on Azure Kubernetes Service (AKS) \n Workload identity federation \n Use Microsoft Entra Workload ID for Kubernetes with a User-Assigned Managed Identity \n Use Microsoft Entra Workload ID for Kubernetes with an Microsoft Entra ID registered application \n Azure Managed Identities with Workload Identity Federation \n Microsoft Entra Workload ID federation with Kubernetes \n Microsoft Entra ID Workload Identity Federation with external OIDC Identy Providers \n Minimal Microsoft Entra Workload ID federation \n \n   \n Kubernetes Terraform module \n This sample makes use of the  kubernetes  of the Helm, Kubectl, and Kubernetes Terraform Providers to install packages and create Kubernetes entities: \n   \n \n NGINX Ingress Controller \n Cert-Manager \n Prometheus: the AKS cluster is configured to collect metrics to the Azure Monitor workspace and Azure Managed Grafana. Nonetheless, the kube-prometheus-stack Helm chart is used to install Prometheus and Grafana on the AKS cluster. \n Workload namespace and service account: the Kubectl Terraform Provider and Kubernetes Terraform Provider are used to create the namespace and service account used by the chat applications. \n Azure Monitor ConfigMaps for Azure Monitor managed service for Prometheus \n Cluster Issuer used by the  cert-manager . \n \n   \n The following table contains the Terraform module used to install the NGINX Ingress Controller: \n \n resource \"helm_release\" \"nginx_ingress_controller\" {\n name = \"ingress-nginx\"\n repository = \"https://kubernetes.github.io/ingress-nginx\"\n chart = \"ingress-nginx\"\n namespace = \"ingress-basic\"\n create_namespace = true\n timeout = 600\n\n set {\n name = \"controller.metrics.enabled\"\n value = \"true\"\n }\n\n set {\n name = \"controller.metrics.serviceMonitor.enabled\"\n value = \"true\"\n }\n \n set {\n name = \"controller.metrics.serviceMonitor.additionalLabels.release\"\n value = \"prometheus\"\n }\n\n\n set {\n name = \"controller.service.annotations.service\\\\.beta\\\\.kubernetes\\\\.io/azure-load-balancer-health-probe-request-path\"\n value = \"/healthz\"\n }\n\n set {\n name = \"controller.nodeSelector.kubernetes\\\\.io/os\"\n value = \"linux\"\n }\n\n set {\n name = \"controller.replicaCount\"\n value = \"${var.nginx_replica_count}\"\n }\n\n depends_on = [helm_release.prometheus]\n} \n \n Instead, the following module is used to create the workload namespace and service account: \n   \n   \n resource \"kubernetes_namespace\" \"namespace\" {\n metadata {\n name = \"${var.namespace}\"\n }\n}\n\nresource \"kubectl_manifest\" \"service_account\" {\n yaml_body = <<-EOF\n apiVersion: v1\n kind: ServiceAccount\n metadata:\n annotations:\n azure.workload.identity/client-id: ${var.workload_managed_identity_client_id}\n azure.workload.identity/tenant-id: ${var.tenant_id}\n labels:\n azure.workload.identity/use: \"true\"\n name: ${var.service_account_name}\n namespace: ${var.namespace}\n EOF\n\n depends_on = [kubernetes_namespace.namespace]\n}\n \n   \n   \n In particular, the  kubectl_manifest  resource makes use of variables to set the following service account annotations and labels necessary to Microsoft Entra Workload ID. For more information, see Service account labels and annotations. \n   \n Simple Chat Application \n The Simple Chat Application is a large language model-based chatbot that allows users to submit general-purpose questions to a GPT model, which generates and streams back human-like and engaging conversational responses. The following picture shows the welcome screen of the chat application. \n \n   \n You can modify the welcome screen in markdown by editing the  chainlit.md  file at the project's root. If you do not want a welcome screen, leave the file empty. The following picture shows what happens when a user submits a new message in the chat. \n \n   \n Chainlit can render messages in markdown format as shown by the following prompt: \n \n   \n Chainlit also provides classes to support the following elements: \n   \n \n Audio: The  Audio  class allows you to display an audio player for a specific audio file in the chatbot user interface. You must provide either a URL or a path or content bytes. \n Avatar: The  Avatar  class allows you to display an avatar image next to a message instead of the author's name. You need to send the element once. Next,, if an avatar's name matches an author's name, the avatar will be automatically displayed. You must provide either a URL or a path or content bytes. \n File: The  File  class allows you to display a button that lets users download the content of the file. You must provide either a URL or a path or content bytes. \n Image: The  Image  class is designed to create and handle image elements to be sent and displayed in the chatbot user interface. You must provide either a URL or a path or content bytes. \n Pdf: The  Pdf  class allows you to display a PDF hosted remotely or locally in the chatbot UI. This class either takes a URL of a PDF hosted online or the path of a local PDF. \n Pyplot: The  Pyplot  class allows you to display a Matplotlib pyplot chart in the chatbot UI. This class takes a pyplot figure. \n TaskList: The  TaskList  class allows you to display a task list next to the chatbot UI. \n Text: The  Text  class allows you to display a text element in the chatbot UI. This class takes a string and creates a text element that can be sent to the UI. It supports the markdown syntax for formatting text. You must provide either a URL or a path or content bytes. \n \n   \n You can click the user icon on the UI to access the chat settings and choose, for example, between the light and dark theme. \n \n   \n The application is built in Python. Let's take a look at the individual parts of the application code. In the following section, the Python code starts by importing the necessary packages/modules. \n   \n   \n # Import packages\nimport os\nimport sys\nfrom openai import AsyncAzureOpenAI\nimport logging\nimport chainlit as cl\nfrom azure.identity import DefaultAzureCredential, get_bearer_token_provider\nfrom dotenv import load_dotenv\nfrom dotenv import dotenv_values\n\n# Load environment variables from .env file\nif os.path.exists(\".env\"):\n load_dotenv(override=True)\n config = dotenv_values(\".env\") \n   \n   \n These are the libraries used by the chat application: \n   \n \n os : This module provides a way of interacting with the operating system, enabling the code to access environment variables, file paths, etc. \n sys : This module provides access to some variables used or maintained by the interpreter and functions that interact with the interpreter. \n openai : The OpenAI Python library provides convenient access to the OpenAI API from applications written in Python. It includes a pre-defined set of classes for API resources that initialize themselves dynamically from API responses which makes it compatible with a wide range of versions of the OpenAI API. You can find usage examples for the OpenAI Python library in our API reference and the OpenAI Cookbook. \n logging : This module provides flexible logging of messages. \n chainlit as cl : This imports the Chainlit library and aliases it as  cl . Chainlit is used to create the UI of the application. \n from azure.identity import DefaultAzureCredential, get_bearer_token_provider : when the  openai_type  property value is  azure_ad,  a  DefaultAzureCredential  object from the Azure Identity client library for Python is used to acquire security token from the Microsoft Entra ID using the credentials of the user-defined managed identity federated with the service account. \n load_dotenv  and  dotenv_values  from  dotenv : Python-dotenv reads key-value pairs from a  .env  file and can set them as environment variables. It helps in the development of applications following the 12-factor principles. \n \n   \n The  requirements.txt  file under the  src  folder contains the list of packages used by the chat applications. You can restore these packages in your environment using the following command: \n \n pip install -r requirements.txt --upgrade \n \n Next, the code reads the value of the environment variables used to initialize Azure OpenAI objects. In addition, it creates a token provider for Azure OpenAI. \n \n # Read environment variables\ntemperature = float(os.environ.get(\"TEMPERATURE\", 0.9))\napi_base = os.getenv(\"AZURE_OPENAI_BASE\")\napi_key = os.getenv(\"AZURE_OPENAI_KEY\")\napi_type = os.environ.get(\"AZURE_OPENAI_TYPE\", \"azure\")\napi_version = os.environ.get(\"AZURE_OPENAI_VERSION\", \"2023-12-01-preview\")\nengine = os.getenv(\"AZURE_OPENAI_DEPLOYMENT\")\nmodel = os.getenv(\"AZURE_OPENAI_MODEL\")\nsystem_content = os.getenv(\n \"AZURE_OPENAI_SYSTEM_MESSAGE\", \"You are a helpful assistant.\"\n)\nmax_retries = int(os.getenv(\"MAX_RETRIES\", 5))\ntimeout = int(os.getenv(\"TIMEOUT\", 30))\ndebug = os.getenv(\"DEBUG\", \"False\").lower() in (\"true\", \"1\", \"t\")\n\n# Create Token Provider\ntoken_provider = get_bearer_token_provider(\n DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\"\n) \n \n Here's a brief explanation of each variable and related environment variable: \n   \n \n temperature : A float value representing the temperature for Create chat completion method of the OpenAI API. It is fetched from the environment variables with a default value of 0.9. \n api_base : The base URL for the OpenAI API. \n api_key : The API key for the OpenAI API. The value of this variable can be null when using a user-assigned managed identity to acquire a security token to access Azure OpenAI. \n api_type : A string representing the type of the OpenAI API. \n api_version : A string representing the version of the OpenAI API. \n engine : The engine used for OpenAI API calls. \n model : The model used for OpenAI API calls. \n system_content : The content of the system message used for OpenAI API calls. \n max_retries : The maximum number of retries for OpenAI API calls. \n timeout : The timeout in seconds. \n debug : When debug is equal to  true ,  t , or  1 , the logger writes the chat completion answers. \n \n   \n In the next section, the code creates the  AsyncAzureOpenAI  client object used by the application to communicate with the Azure OpenAI Service instance. When the  api_type  is equal to  azure , the code initializes the object with the API key. Otherwise, it initializes the  azure_ad_token_provider  property to the token provider created earlier. Then the code creates a logger. \n \n # Configure OpenAI\nif api_type == \"azure\":\n openai = AsyncAzureOpenAI(\n api_version=api_version,\n api_key=api_key,\n azure_endpoint=api_base,\n max_retries=max_retries,\n timeout=timeout,\n )\nelse:\n openai = AsyncAzureOpenAI(\n api_version=api_version,\n azure_endpoint=api_base,\n azure_ad_token_provider=token_provider,\n max_retries=max_retries,\n timeout=timeout\n )\n\n# Configure a logger\nlogging.basicConfig(\n stream=sys.stdout,\n format=\"[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s\",\n level=logging.INFO,\n)\nlogger = logging.getLogger(__name__) \n \n The backoff time is calculated using the  backoff_in_seconds  and  attempt  variables. It follows the formula  backoff_in_seconds * 2 ** attempt + random.uniform(0, 1) . This formula increases the backoff time exponentially with each attempt and adds a random value between 0 and 1 to avoid synchronized retries. \n Next, the code defines a function called  start_chat  that is used to initialize the UI when the user connects to the application or clicks the  New Chat  button. \n   \n   \n   \n .on_chat_start\nasync def start_chat():\n await cl.Avatar(\n name=\"Chatbot\", url=\"https://cdn-icons-png.flaticon.com/512/8649/8649595.png\"\n ).send()\n await cl.Avatar(\n name=\"Error\", url=\"https://cdn-icons-png.flaticon.com/512/8649/8649595.png\"\n ).send()\n await cl.Avatar(\n name=\"User\",\n url=\"https://media.architecturaldigest.com/photos/5f241de2c850b2a36b415024/master/w_1600%2Cc_limit/Luke-logo.png\",\n ).send()\n cl.user_session.set(\n \"message_history\",\n [{\"role\": \"system\", \"content\": system_content}],\n )\n \n   \n   \n Here is a brief explanation of the function steps: \n   \n \n cl.on_chat_start : The on_chat_start decorator registers a callback function  start_chat()  to be called when the Chainlit chat starts. It is used to set up the chat and send avatars for the Chatbot, Error, and User participants in the chat. \n cl.Avatar() : the Avatar class allows you to display an avatar image next to a message instead of the author name. You need to send the element once. Next if the name of an avatar matches the name of an author, the avatar will be automatically displayed. You must provide either a URL or a path or content bytes. \n cl.user_session.set() : This API call sets a value in the user_session dictionary. In this case, it initializes the  message_history  in the user's session with a system content message, which indicates the start of the chat. \n \n Finally, the application defines the method called whenever the user sends a new message in the chat. \n \n @cl.on_message\nasync def on_message(message: cl.Message):\n message_history = cl.user_session.get(\"message_history\")\n message_history.append({\"role\": \"user\", \"content\": message.content})\n logger.info(\"Question: [%s]\", message.content)\n\n # Create the Chainlit response message\n msg = cl.Message(content=\"\")\n\n async for stream_resp in await openai.chat.completions.create(\n model=model,\n messages=message_history,\n temperature=temperature,\n stream=True,\n ):\n if stream_resp and len(stream_resp.choices) > 0:\n token = stream_resp.choices[0].delta.content or \"\"\n await msg.stream_token(token)\n\n if debug:\n logger.info(\"Answer: [%s]\", msg.content)\n\n message_history.append({\"role\": \"assistant\", \"content\": msg.content})\n await msg.send() \n \n Here is a detailed explanation of the function steps: \n   \n \n cl.on_message : The on_message decorator registers a callback function  main(message: str)  to be called when the user submits a new message in the chat. It is the main function responsible for handling the chat logic. \n cl.user_session.get() : This API call retrieves a value from the user's session data stored in the user_session dictionary. In this case, it fetches the  message_history  from the user's session to maintain the chat history. \n message_history.append() : This API call appends a new message to the  message_history  list. It is used to add the user's message and the assistant's response to the chat history. \n cl.Message() : This API call creates a Chainlit Message object. The  Message  class is designed to send, stream, edit, or remove messages in the chatbot user interface. In this sample, the  Message  object is used to stream the OpenAI response in the chat. \n msg.stream_token() : The stream_token method of the Message class streams a token to the response message. It is used to send the response from the OpenAI Chat API in chunks to ensure real-time streaming in the chat. \n await openai.chat.completions.create() : This API call sends a message to the OpenAI Chat API in an asynchronous mode and streams the response. It uses the provided  message_history  as context for generating the assistant's response. \n \n Below, you can read the complete code of the application. \n \n # Import packages\nimport os\nimport sys\nfrom openai import AsyncAzureOpenAI\nimport logging\nimport chainlit as cl\nfrom azure.identity import DefaultAzureCredential, get_bearer_token_provider\nfrom dotenv import load_dotenv\nfrom dotenv import dotenv_values\n\n# Load environment variables from .env file\nif os.path.exists(\".env\"):\n load_dotenv(override=True)\n config = dotenv_values(\".env\")\n\n# Read environment variables\ntemperature = float(os.environ.get(\"TEMPERATURE\", 0.9))\napi_base = os.getenv(\"AZURE_OPENAI_BASE\")\napi_key = os.getenv(\"AZURE_OPENAI_KEY\")\napi_type = os.environ.get(\"AZURE_OPENAI_TYPE\", \"azure\")\napi_version = os.environ.get(\"AZURE_OPENAI_VERSION\", \"2023-12-01-preview\")\nengine = os.getenv(\"AZURE_OPENAI_DEPLOYMENT\")\nmodel = os.getenv(\"AZURE_OPENAI_MODEL\")\nsystem_content = os.getenv(\n \"AZURE_OPENAI_SYSTEM_MESSAGE\", \"You are a helpful assistant.\"\n)\nmax_retries = int(os.getenv(\"MAX_RETRIES\", 5))\ntimeout = int(os.getenv(\"TIMEOUT\", 30))\ndebug = os.getenv(\"DEBUG\", \"False\").lower() in (\"true\", \"1\", \"t\")\n\n# Create Token Provider\ntoken_provider = get_bearer_token_provider(\n DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\"\n)\n\n# Configure OpenAI\nif api_type == \"azure\":\n openai = AsyncAzureOpenAI(\n api_version=api_version,\n api_key=api_key,\n azure_endpoint=api_base,\n max_retries=max_retries,\n timeout=timeout,\n )\nelse:\n openai = AsyncAzureOpenAI(\n api_version=api_version,\n azure_endpoint=api_base,\n azure_ad_token_provider=token_provider,\n max_retries=max_retries,\n timeout=timeout,\n )\n\n# Configure a logger\nlogging.basicConfig(\n stream=sys.stdout,\n format=\"[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s\",\n level=logging.INFO,\n)\nlogger = logging.getLogger(__name__)\n\n\n@cl.on_chat_start\nasync def start_chat():\n await cl.Avatar(\n name=\"Chatbot\", url=\"https://cdn-icons-png.flaticon.com/512/8649/8649595.png\"\n ).send()\n await cl.Avatar(\n name=\"Error\", url=\"https://cdn-icons-png.flaticon.com/512/8649/8649595.png\"\n ).send()\n await cl.Avatar(\n name=\"You\",\n url=\"https://media.architecturaldigest.com/photos/5f241de2c850b2a36b415024/master/w_1600%2Cc_limit/Luke-logo.png\",\n ).send()\n cl.user_session.set(\n \"message_history\",\n [{\"role\": \"system\", \"content\": system_content}],\n )\n\n\n@cl.on_message\nasync def on_message(message: cl.Message):\n message_history = cl.user_session.get(\"message_history\")\n message_history.append({\"role\": \"user\", \"content\": message.content})\n logger.info(\"Question: [%s]\", message.content)\n\n # Create the Chainlit response message\n msg = cl.Message(content=\"\")\n\n async for stream_resp in await openai.chat.completions.create(\n model=model,\n messages=message_history,\n temperature=temperature,\n stream=True,\n ):\n if stream_resp and len(stream_resp.choices) > 0:\n token = stream_resp.choices[0].delta.content or \"\"\n await msg.stream_token(token)\n\n if debug:\n logger.info(\"Answer: [%s]\", msg.content)\n\n message_history.append({\"role\": \"assistant\", \"content\": msg.content})\n await msg.send() \n \n You can run the application locally using the following command. The  -w  flag` indicates auto-reload whenever we make changes live in our application code. \n \n chainlit run app.py -w \n \n Documents QA Chat \n The Documents QA Chat application allows users to submit up to 10  .pdf  and  .docx  documents. The application processes the uploaded documents to create vector embeddings. These embeddings are stored in ChromaDB vector database for efficient retrieval. Users can pose questions about the uploaded documents and view the Chain of Thought, enabling easy exploration of the reasoning process. The completion message contains links to the text chunks in the documents that were used as a source for the response. The following picture shows the chat application interface. As you can see, you can click the  Browse  button and choose up to 10  .pdf  and  .docx  documents to upload. Alternatively, you can just drag and drop the files over the control area. \n \n   \n After uploading the documents, the application creates and stores embeddings to ChromaDB vector database. During the phase, the UI shows a message  Processing <file-1>, <file-2>... , as shown in the following picture: \n \n   \n When the code finished creating embeddings, the UI is ready to receive user's questions: \n \n   \n As your chat application grows in complexity, understanding the individual steps for generating a specific answer can become challenging. To solve this issue, Chainlit allows you to easily explore the reasoning process right from the user interface using the Chain of Thought. If you are using the LangChain integration, every intermediary step is automatically sent and displayed in the Chainlit UI just clicking and expanding the steps, as shown in the following picture: \n \n   \n To see the text chunks that were used by the large language model to originate the response, you can click the sources links, as shown in the following picture: \n \n   \n In the Chain of Thought, below the step used to invoke the OpenAI chat completion API, you can find an \n  Inspect in prompt playground  icon. Clicking on it opens the Prompt Playground dialog which allows you to modify and iterate on the prompt as needed. \n \n   \n As shown in the following picture, you can click and edit the value of the highlighted variables in the user prompt: \n \n   \n You can then click and edit the user question. \n \n   \n Then, you can click the submit button to test the effect of your changes, as shown in the following picture. \n \n   \n Let's take a look at the individual parts of the application code. In the following section, the Python code starts by importing the necessary packages/modules. \n \n # Import packages\nimport os\nimport io\nimport sys\nimport logging\nimport chainlit as cl\nfrom chainlit.playground.config import AzureChatOpenAI\nfrom pypdf import PdfReader\nfrom docx import Document\nfrom azure.identity import DefaultAzureCredential, get_bearer_token_provider\nfrom dotenv import load_dotenv\nfrom dotenv import dotenv_values\nfrom langchain.embeddings import AzureOpenAIEmbeddings\nfrom langchain.text_splitter import RecursiveCharacterTextSplitter\nfrom langchain.vectorstores.chroma import Chroma\nfrom langchain.chains import RetrievalQAWithSourcesChain\nfrom langchain.chat_models import AzureChatOpenAI\nfrom langchain.prompts.chat import (\n ChatPromptTemplate,\n SystemMessagePromptTemplate,\n HumanMessagePromptTemplate,\n)\n\n# Load environment variables from .env file\nif os.path.exists(\".env\"):\n load_dotenv(override=True)\n config = dotenv_values(\".env\") \n \n These are the libraries used by the chat application: \n   \n \n os : This module provides a way of interacting with the operating system, enabling the code to access environment variables, file paths, etc. \n sys : This module provides access to some variables used or maintained by the interpreter and functions that interact with the interpreter. \n time : This module provides various time-related functions for time manipulation and measurement. \n openai : the OpenAI Python library provides convenient access to the OpenAI API from applications written in the Python language. It includes a pre-defined set of classes for API resources that initialize themselves dynamically from API responses, which makes it compatible with a wide range of versions of the OpenAI API. You can find usage examples for the OpenAI Python library in our API reference and the OpenAI Cookbook. \n logging : This module provides flexible logging of messages. \n chainlit as cl : This imports the Chainlit library and aliases it as  cl.  Chainlit is used to create the UI of the application. \n AzureChatOpenAI  from  chainlit.playground.config import : you need to import  AzureChatOpenAI  from  chainlit.playground.config  to use the Chainlit Playground. \n DefaultAzureCredential  from  azure.identity : when the  openai_type  property value is  azure_ad , a  DefaultAzureCredential  object from the Azure Identity client library for Python - version 1.13.0 is used to acquire security token from the Microsoft Entra ID using the credentials of the user-defined managed identity, whose client ID is defined in the  AZURE_CLIENT_ID  environment variable. \n load_dotenv  and  dotenv_values  from  dotenv : Python-dotenv reads key-value pairs from a  .env  file and can set them as environment variables. It helps in the development of applications following the 12-factor principles. \n langchain : Large language models (LLMs) are emerging as a transformative technology, enabling developers to build applications that they previously could not. However, using these LLMs in isolation is often insufficient for creating a truly powerful app - the real power comes when you can combine them with other sources of computation or knowledge. LangChain library aims to assist in the development of those types of applications. \n \n The  requirements.txt  file under the  src  folder contains the list of packages used by the chat applications. You can restore these packages in your environment using the following command: \n \n pip install -r requirements.txt --upgrade \n \n Next, the code reads environment variables and configures the OpenAI settings. \n \n # Read environment variables\ntemperature = float(os.environ.get(\"TEMPERATURE\", 0.9))\napi_base = os.getenv(\"AZURE_OPENAI_BASE\")\napi_key = os.getenv(\"AZURE_OPENAI_KEY\")\napi_type = os.environ.get(\"AZURE_OPENAI_TYPE\", \"azure\")\napi_version = os.environ.get(\"AZURE_OPENAI_VERSION\", \"2023-12-01-preview\")\nchat_completion_deployment = os.getenv(\"AZURE_OPENAI_DEPLOYMENT\")\nembeddings_deployment = os.getenv(\"AZURE_OPENAI_ADA_DEPLOYMENT\")\nmodel = os.getenv(\"AZURE_OPENAI_MODEL\")\nmax_size_mb = int(os.getenv(\"CHAINLIT_MAX_SIZE_MB\", 100))\nmax_files = int(os.getenv(\"CHAINLIT_MAX_FILES\", 10))\ntext_splitter_chunk_size = int(os.getenv(\"TEXT_SPLITTER_CHUNK_SIZE\", 1000))\ntext_splitter_chunk_overlap = int(os.getenv(\"TEXT_SPLITTER_CHUNK_OVERLAP\", 10))\nembeddings_chunk_size = int(os.getenv(\"EMBEDDINGS_CHUNK_SIZE\", 16))\nmax_retries = int(os.getenv(\"MAX_RETRIES\", 5))\nretry_min_seconds = int(os.getenv(\"RETRY_MIN_SECONDS\", 1))\nretry_max_seconds = int(os.getenv(\"RETRY_MAX_SECONDS\", 5))\ntimeout = int(os.getenv(\"TIMEOUT\", 30))\ndebug = os.getenv(\"DEBUG\", \"False\").lower() in (\"true\", \"1\", \"t\")\n\n# Configure system prompt\nsystem_template = \"\"\"Use the following pieces of context to answer the users question.\nIf you don't know the answer, just say that you don't know, don't try to make up an answer.\nALWAYS return a \"SOURCES\" part in your answer.\nThe \"SOURCES\" part should be a reference to the source of the document from which you got your answer.\n\nExample of your response should be:\n\n\\`\\`\\`\nThe answer is foo\nSOURCES: xyz\n\\`\\`\\`\n\nBegin!\n----------------\n{summaries}\"\"\"\nmessages = [\n SystemMessagePromptTemplate.from_template(system_template),\n HumanMessagePromptTemplate.from_template(\"{question}\"),\n]\nprompt = ChatPromptTemplate.from_messages(messages)\nchain_type_kwargs = {\"prompt\": prompt}\n\n# Configure a logger\nlogging.basicConfig(\n stream=sys.stdout,\n format=\"[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s\",\n level=logging.INFO,\n)\nlogger = logging.getLogger(__name__)\n\n# Create Token Provider\nif api_type == \"azure_ad\":\n token_provider = get_bearer_token_provider(\n DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\"\n )\n\n# Setting the environment variables for the playground\nif api_type == \"azure\":\n os.environ[\"AZURE_OPENAI_API_KEY\"] = api_key\nos.environ[\"AZURE_OPENAI_API_VERSION\"] = api_version\nos.environ[\"AZURE_OPENAI_ENDPOINT\"] = api_base\nos.environ[\"AZURE_OPENAI_DEPLOYMENT_NAME\"] = chat_completion_deployment \n \n Here's a brief explanation of each variable and related environment variable: \n   \n \n temperature : A float value representing the temperature for Create chat completion method of the OpenAI API. It is fetched from the environment variables with a default value of 0.9. \n api_base : The base URL for the OpenAI API. \n api_key : The API key for the OpenAI API. The value of this variable can be null when using a user-assigned managed identity to acquire a security token to access Azure OpenAI. \n api_type : A string representing the type of the OpenAI API. \n api_version : A string representing the version of the OpenAI API. \n chat_completion_deployment : the name of the Azure OpenAI GPT model for chat completion. \n embeddings_deployment : the name of the Azure OpenAI deployment for embeddings. \n model : The model used for chat completion calls (e.g,  gpt-35-turbo-16k ). \n max_size_mb : the maximum size for the uploaded documents. \n max_files : the maximum number of documents that can be uploaded. \n text_splitter_chunk_size : the maximum chunk size used by the  RecursiveCharacterTextSplitter  object. \n text_splitter_chunk_overlap : the maximum chunk overlap used by the  RecursiveCharacterTextSplitter  object. \n embeddings_chunk_size : the maximum chunk size used by the  OpenAIEmbeddings  object. \n max_retries : The maximum number of retries for OpenAI API calls. \n retry_min_seconds : the minimum number of seconds before a retry. \n retry_max_seconds : the maximum number of seconds before a retry. \n timeout : The timeout in seconds. \n system_template : The content of the system message used for OpenAI API calls. \n debug : When debug is equal to  true ,  t , or  1 , the logger switches to verbose mode. \n \n   \n Next, the code defines a function called  start_chat  that is used to initialize the when the user connects to the application or clicks the  New Chat  button. \n \n @cl.on_chat_start\nasync def start_chat():\n # Sending Avatars for Chat Participants\n await cl.Avatar(\n name=\"Chatbot\",\n url=\"https://cdn-icons-png.flaticon.com/512/8649/8649595.png\"\n ).send()\n await cl.Avatar(\n name=\"Error\",\n url=\"https://cdn-icons-png.flaticon.com/512/8649/8649595.png\"\n ).send()\n await cl.Avatar(\n name=\"You\",\n url=\"https://media.architecturaldigest.com/photos/5f241de2c850b2a36b415024/master/w_1600%2Cc_limit/Luke-logo.png\"\n ).send() \n \n Here is a brief explanation of the function steps: \n   \n \n cl.on_chat_start : The on_chat_start decorator registers a callback function  start_chat()  to be called when the Chainlit chat starts. It is used to set up the chat and send avatars for the Chatbot, Error, and User participants in the chat. \n cl.Avatar() : the Avatar class allows you to display an avatar image next to a message instead of the author name. You need to send the element once. Next if the name of an avatar matches the name of an author, the avatar will be automatically displayed. You must provide either a URL or a path or content bytes. \n \n   \n The following code is used to initialize the large language model (LLM) chain used to reply to questions on the content of the uploaded documents. \n \n # Initialize the file list to None\n files = None\n\n # Wait for the user to upload a file\n while files == None:\n files = await cl.AskFileMessage(\n content=f\"Please upload up to {max_files} `.pdf` or `.docx` files to begin.\",\n accept=[\n \"application/pdf\",\n \"application/vnd.openxmlformats-officedocument.wordprocessingml.document\",\n ],\n max_size_mb=max_size_mb,\n max_files=max_files,\n timeout=86400,\n raise_on_timeout=False,\n ).send() \n \n The AskFileMessage API call prompts the user to upload up to a specified number of  .pdf  or  .docx  files. The uploaded files are stored in the  files  variable. The process continues until the user uploads files. For more information, see AskFileMessage. \n The following code processes each uploaded file by extracting its content. \n   \n \n The text content of each file is stored in the list  all_texts . \n This code performs text processing and chunking. It checks the file extension to read the file content accordingly, depending on if it's a  .pdf  or a  .docx  document. \n The text content is split into smaller chunks using the RecursiveCharacterTextSplitter LangChain object. \n Metadata is created for each chunk and stored in the  metadatas  list. \n \n \n # Create a message to inform the user that the files are being processed\n content = \"\"\n if len(files) == 1:\n content = f\"Processing `{files[0].name}`...\"\n else:\n files_names = [f\"`{f.name}`\" for f in files]\n content = f\"Processing {', '.join(files_names)}...\"\n logger.info(content)\n msg = cl.Message(content=content, author=\"Chatbot\")\n await msg.send()\n\n # Create a list to store the texts of each file\n all_texts = []\n\n # Process each file uplodaded by the user\n for file in files:\n # Read file contents\n with open(file.path, \"rb\") as uploaded_file:\n file_contents = uploaded_file.read()\n\n logger.info(\"[%d] bytes were read from %s\", len(file_contents), file.path)\n\n # Create an in-memory buffer from the file content\n bytes = io.BytesIO(file_contents)\n\n # Get file extension\n extension = file.name.split(\".\")[-1]\n\n # Initialize the text variable\n text = \"\"\n\n # Read the file\n if extension == \"pdf\":\n reader = PdfReader(bytes)\n for i in range(len(reader.pages)):\n text += reader.pages[i].extract_text()\n if debug:\n logger.info(\"[%s] read from %s\", text, file.path)\n elif extension == \"docx\":\n doc = Document(bytes)\n paragraph_list = []\n for paragraph in doc.paragraphs:\n paragraph_list.append(paragraph.text)\n if debug:\n logger.info(\"[%s] read from %s\", paragraph.text, file.path)\n text = \"\\n\".join(paragraph_list)\n\n # Split the text into chunks\n text_splitter = RecursiveCharacterTextSplitter(\n chunk_size=text_splitter_chunk_size,\n chunk_overlap=text_splitter_chunk_overlap,\n )\n texts = text_splitter.split_text(text)\n\n # Add the chunks and metadata to the list\n all_texts.extend(texts)\n\n # Create a metadata for each chunk\n metadatas = [{\"source\": f\"{i}-pl\"} for i in range(len(all_texts))] \n \n The next piece of code performs the following steps: \n   \n \n It creates an AzureOpenAIEmbeddings configured to use the embeddings model in the Azure OpenAI Service to create embeddings from text chunks. \n It creates a ChromaDB vector database using the  OpenAIEmbeddings  object, the text chunks list, and the metadata list. \n It creates an AzureChatOpenAI LangChain object based on the GPR model hosted in Azure OpenAI Service. \n It creates a chain using the RetrievalQAWithSourcesChain.from_chain_type API call uses previously created models and stores them as retrievers. \n It stores the metadata and text chunks in the user session using the  cl.user_session.set()  API call. \n It creates a message to inform the user that the files are ready for queries, and finally returns the  chain . \n The  cl.user_session.set(\"chain\", chain)  call stores the LLM chain in the user_session dictionary for later use. \n \n The next section create the LangChain LLM chain. \n \n # Create a Chroma vector store\n if api_type == \"azure\":\n embeddings = AzureOpenAIEmbeddings(\n openai_api_version=api_version,\n openai_api_type=api_type,\n openai_api_key=api_key,\n azure_endpoint=api_base,\n azure_deployment=embeddings_deployment,\n max_retries=max_retries,\n retry_min_seconds=retry_min_seconds,\n retry_max_seconds=retry_max_seconds,\n chunk_size=embeddings_chunk_size,\n timeout=timeout,\n )\n else:\n embeddings = AzureOpenAIEmbeddings(\n openai_api_version=api_version,\n openai_api_type=api_type,\n azure_endpoint=api_base,\n azure_ad_token_provider=token_provider,\n azure_deployment=embeddings_deployment,\n max_retries=max_retries,\n retry_min_seconds=retry_min_seconds,\n retry_max_seconds=retry_max_seconds,\n chunk_size=embeddings_chunk_size,\n timeout=timeout,\n )\n\n # Create a Chroma vector store\n db = await cl.make_async(Chroma.from_texts)(\n all_texts, embeddings, metadatas=metadatas\n )\n\n # Create an AzureChatOpenAI llm\n if api_type == \"azure\":\n llm = AzureChatOpenAI(\n openai_api_type=api_type,\n openai_api_version=api_version,\n openai_api_key=api_key,\n azure_endpoint=api_base,\n temperature=temperature,\n azure_deployment=chat_completion_deployment,\n streaming=True,\n max_retries=max_retries,\n timeout=timeout,\n )\n else:\n llm = AzureChatOpenAI(\n openai_api_type=api_type,\n openai_api_version=api_version,\n azure_endpoint=api_base,\n api_key=api_key,\n temperature=temperature,\n azure_deployment=chat_completion_deployment,\n azure_ad_token_provider=token_provider,\n streaming=True,\n max_retries=max_retries,\n timeout=timeout,\n )\n\n # Create a chain that uses the Chroma vector store\n chain = RetrievalQAWithSourcesChain.from_chain_type(\n llm=llm,\n chain_type=\"stuff\",\n retriever=db.as_retriever(),\n return_source_documents=True,\n chain_type_kwargs=chain_type_kwargs,\n )\n\n # Save the metadata and texts in the user session\n cl.user_session.set(\"metadatas\", metadatas)\n cl.user_session.set(\"texts\", all_texts)\n\n # Create a message to inform the user that the files are ready for queries\n content = \"\"\n if len(files) == 1:\n content = f\"`{files[0].name}` processed. You can now ask questions!\"\n logger.info(content)\n else:\n files_names = [f\"`{f.name}`\" for f in files]\n content = f\"{', '.join(files_names)} processed. You can now ask questions.\"\n logger.info(content)\n msg.content = content\n msg.author = \"Chatbot\"\n await msg.update()\n\n # Store the chain in the user session\n cl.user_session.set(\"chain\", chain) \n \n The following code handles the communication with the OpenAI API and incorporates retrying logic in case the API calls fail due to specific errors. \n   \n \n cl.on_message : The on_message decorator registers a callback function  main(message: str)  to be called when the user submits a new message in the chat. It is the main function responsible for handling the chat logic. \n cl.user_session.get(\"chain\") : this call retrieves the LLM chain from the user_session dictionary. \n cl.AsyncLangchainCallbackHandler : this call creates a LangChain callback handler. \n await chain.acall : The asynchronous call to the RetrievalQAWithSourcesChain.acall executes the LLM chain with the user message as an input. \n \n \n @cl.on_message\nasync def main(message: cl.Message):\n # Retrieve the chain from the user session\n chain = cl.user_session.get(\"chain\")\n\n # Create a callback handler\n cb = cl.AsyncLangchainCallbackHandler()\n\n # Get the response from the chain\n response = await chain.acall(message.content, callbacks=[cb])\n logger.info(\"Question: [%s]\", message.content) \n \n The code below extracts the answers and sources from the API response and formats them to be sent as a message. \n \n The  answer  and  sources  are obtained from the  response  dictionary. \n The sources are then processed to find corresponding texts in the user session metadata ( metadatas ) and create  source_elements  using  cl.Text() . \n cl.Message().send() : the Message API creates and displays a message containing the answer and sources, if available. \n The last command sets the  AZURE_OPENAI_API_KEY  environment variable to a security key to access Azure OpenAI returned by the token provider. This key is used by the Chainlit playground. \n \n   \n   \n # Get the answer and sources from the response\n answer = response[\"answer\"]\n sources = response[\"sources\"].strip()\n source_elements = []\n\n if debug:\n logger.info(\"Answer: [%s]\", answer)\n\n # Get the metadata and texts from the user session\n metadatas = cl.user_session.get(\"metadatas\")\n all_sources = [m[\"source\"] for m in metadatas]\n texts = cl.user_session.get(\"texts\")\n\n if sources:\n found_sources = []\n\n # Add the sources to the message\n for source in sources.split(\",\"):\n source_name = source.strip().replace(\".\", \"\")\n # Get the index of the source\n try:\n index = all_sources.index(source_name)\n except ValueError:\n continue\n text = texts[index]\n found_sources.append(source_name)\n # Create the text element referenced in the message\n source_elements.append(cl.Text(content=text, name=source_name))\n\n if found_sources:\n answer += f\"\\nSources: {', '.join(found_sources)}\"\n else:\n answer += \"\\nNo sources found\"\n\n await cl.Message(content=answer, elements=source_elements).send()\n\n # Setting the AZURE_OPENAI_API_KEY environment variable for the playground\n if api_type == \"azure_ad\":\n os.environ[\"AZURE_OPENAI_API_KEY\"] = token_provider()\n \n   \n   \n Below, you can read the complete code of the application. \n \n # Import packages\nimport os\nimport io\nimport sys\nimport logging\nimport chainlit as cl\nfrom chainlit.playground.config import AzureChatOpenAI\nfrom pypdf import PdfReader\nfrom docx import Document\nfrom azure.identity import DefaultAzureCredential, get_bearer_token_provider\nfrom dotenv import load_dotenv\nfrom dotenv import dotenv_values\nfrom langchain.embeddings import AzureOpenAIEmbeddings\nfrom langchain.text_splitter import RecursiveCharacterTextSplitter\nfrom langchain.vectorstores.chroma import Chroma\nfrom langchain.chains import RetrievalQAWithSourcesChain\nfrom langchain.chat_models import AzureChatOpenAI\nfrom langchain.prompts.chat import (\n ChatPromptTemplate,\n SystemMessagePromptTemplate,\n HumanMessagePromptTemplate,\n)\n\n# Load environment variables from .env file\nif os.path.exists(\".env\"):\n load_dotenv(override=True)\n config = dotenv_values(\".env\")\n\n# Read environment variables\ntemperature = float(os.environ.get(\"TEMPERATURE\", 0.9))\napi_base = os.getenv(\"AZURE_OPENAI_BASE\")\napi_key = os.getenv(\"AZURE_OPENAI_KEY\")\napi_type = os.environ.get(\"AZURE_OPENAI_TYPE\", \"azure\")\napi_version = os.environ.get(\"AZURE_OPENAI_VERSION\", \"2023-12-01-preview\")\nchat_completion_deployment = os.getenv(\"AZURE_OPENAI_DEPLOYMENT\")\nembeddings_deployment = os.getenv(\"AZURE_OPENAI_ADA_DEPLOYMENT\")\nmodel = os.getenv(\"AZURE_OPENAI_MODEL\")\nmax_size_mb = int(os.getenv(\"CHAINLIT_MAX_SIZE_MB\", 100))\nmax_files = int(os.getenv(\"CHAINLIT_MAX_FILES\", 10))\nmax_files = int(os.getenv(\"CHAINLIT_MAX_FILES\", 10))\ntext_splitter_chunk_size = int(os.getenv(\"TEXT_SPLITTER_CHUNK_SIZE\", 1000))\ntext_splitter_chunk_overlap = int(os.getenv(\"TEXT_SPLITTER_CHUNK_OVERLAP\", 10))\nembeddings_chunk_size = int(os.getenv(\"EMBEDDINGS_CHUNK_SIZE\", 16))\nmax_retries = int(os.getenv(\"MAX_RETRIES\", 5))\nretry_min_seconds = int(os.getenv(\"RETRY_MIN_SECONDS\", 1))\nretry_max_seconds = int(os.getenv(\"RETRY_MAX_SECONDS\", 5))\ntimeout = int(os.getenv(\"TIMEOUT\", 30))\ndebug = os.getenv(\"DEBUG\", \"False\").lower() in (\"true\", \"1\", \"t\")\n\n# Configure system prompt\nsystem_template = \"\"\"Use the following pieces of context to answer the users question.\nIf you don't know the answer, just say that you don't know, don't try to make up an answer.\nALWAYS return a \"SOURCES\" part in your answer.\nThe \"SOURCES\" part should be a reference to the source of the document from which you got your answer.\n\nExample of your response should be:\n\n```\nThe answer is foo\nSOURCES: xyz\n```\n\nBegin!\n----------------\n{summaries}\"\"\"\nmessages = [\n SystemMessagePromptTemplate.from_template(system_template),\n HumanMessagePromptTemplate.from_template(\"{question}\"),\n]\nprompt = ChatPromptTemplate.from_messages(messages)\nchain_type_kwargs = {\"prompt\": prompt}\n\n# Configure a logger\nlogging.basicConfig(\n stream=sys.stdout,\n format=\"[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s\",\n level=logging.INFO,\n)\nlogger = logging.getLogger(__name__)\n\n# Create Token Provider\nif api_type == \"azure_ad\":\n token_provider = get_bearer_token_provider(\n DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\"\n )\n\n# Setting the environment variables for the playground\nif api_type == \"azure\":\n os.environ[\"AZURE_OPENAI_API_KEY\"] = api_key\nos.environ[\"AZURE_OPENAI_API_VERSION\"] = api_version\nos.environ[\"AZURE_OPENAI_ENDPOINT\"] = api_base\nos.environ[\"AZURE_OPENAI_DEPLOYMENT_NAME\"] = chat_completion_deployment\n\n\n@cl.on_chat_start\nasync def start():\n await cl.Avatar(\n name=\"Chatbot\", url=\"https://cdn-icons-png.flaticon.com/512/8649/8649595.png\"\n ).send()\n await cl.Avatar(\n name=\"Error\", url=\"https://cdn-icons-png.flaticon.com/512/8649/8649595.png\"\n ).send()\n await cl.Avatar(\n name=\"You\",\n url=\"https://media.architecturaldigest.com/photos/5f241de2c850b2a36b415024/master/w_1600%2Cc_limit/Luke-logo.png\",\n ).send()\n\n # Initialize the file list to None\n files = None\n\n # Wait for the user to upload a file\n while files == None:\n files = await cl.AskFileMessage(\n content=f\"Please upload up to {max_files} `.pdf` or `.docx` files to begin.\",\n accept=[\n \"application/pdf\",\n \"application/vnd.openxmlformats-officedocument.wordprocessingml.document\",\n ],\n max_size_mb=max_size_mb,\n max_files=max_files,\n timeout=86400,\n raise_on_timeout=False,\n ).send()\n\n # Create a message to inform the user that the files are being processed\n content = \"\"\n if len(files) == 1:\n content = f\"Processing `{files[0].name}`...\"\n else:\n files_names = [f\"`{f.name}`\" for f in files]\n content = f\"Processing {', '.join(files_names)}...\"\n logger.info(content)\n msg = cl.Message(content=content, author=\"Chatbot\")\n await msg.send()\n\n # Create a list to store the texts of each file\n all_texts = []\n\n # Process each file uplodaded by the user\n for file in files:\n # Read file contents\n with open(file.path, \"rb\") as uploaded_file:\n file_contents = uploaded_file.read()\n\n logger.info(\"[%d] bytes were read from %s\", len(file_contents), file.path)\n\n # Create an in-memory buffer from the file content\n bytes = io.BytesIO(file_contents)\n\n # Get file extension\n extension = file.name.split(\".\")[-1]\n\n # Initialize the text variable\n text = \"\"\n\n # Read the file\n if extension == \"pdf\":\n reader = PdfReader(bytes)\n for i in range(len(reader.pages)):\n text += reader.pages[i].extract_text()\n if debug:\n logger.info(\"[%s] read from %s\", text, file.path)\n elif extension == \"docx\":\n doc = Document(bytes)\n paragraph_list = []\n for paragraph in doc.paragraphs:\n paragraph_list.append(paragraph.text)\n if debug:\n logger.info(\"[%s] read from %s\", paragraph.text, file.path)\n text = \"\\n\".join(paragraph_list)\n\n # Split the text into chunks\n text_splitter = RecursiveCharacterTextSplitter(\n chunk_size=text_splitter_chunk_size,\n chunk_overlap=text_splitter_chunk_overlap,\n )\n texts = text_splitter.split_text(text)\n\n # Add the chunks and metadata to the list\n all_texts.extend(texts)\n\n # Create a metadata for each chunk\n metadatas = [{\"source\": f\"{i}-pl\"} for i in range(len(all_texts))]\n\n # Create a Chroma vector store\n if api_type == \"azure\":\n embeddings = AzureOpenAIEmbeddings(\n openai_api_version=api_version,\n openai_api_type=api_type,\n openai_api_key=api_key,\n azure_endpoint=api_base,\n azure_deployment=embeddings_deployment,\n max_retries=max_retries,\n retry_min_seconds=retry_min_seconds,\n retry_max_seconds=retry_max_seconds,\n chunk_size=embeddings_chunk_size,\n timeout=timeout,\n )\n else:\n embeddings = AzureOpenAIEmbeddings(\n openai_api_version=api_version,\n openai_api_type=api_type,\n azure_endpoint=api_base,\n azure_ad_token_provider=token_provider,\n azure_deployment=embeddings_deployment,\n max_retries=max_retries,\n retry_min_seconds=retry_min_seconds,\n retry_max_seconds=retry_max_seconds,\n chunk_size=embeddings_chunk_size,\n timeout=timeout,\n )\n\n # Create a Chroma vector store\n db = await cl.make_async(Chroma.from_texts)(\n all_texts, embeddings, metadatas=metadatas\n )\n\n # Create an AzureChatOpenAI llm\n if api_type == \"azure\":\n llm = AzureChatOpenAI(\n openai_api_type=api_type,\n openai_api_version=api_version,\n openai_api_key=api_key,\n azure_endpoint=api_base,\n temperature=temperature,\n azure_deployment=chat_completion_deployment,\n streaming=True,\n max_retries=max_retries,\n timeout=timeout,\n )\n else:\n llm = AzureChatOpenAI(\n openai_api_type=api_type,\n openai_api_version=api_version,\n azure_endpoint=api_base,\n api_key=api_key,\n temperature=temperature,\n azure_deployment=chat_completion_deployment,\n azure_ad_token_provider=token_provider,\n streaming=True,\n max_retries=max_retries,\n timeout=timeout,\n )\n\n # Create a chain that uses the Chroma vector store\n chain = RetrievalQAWithSourcesChain.from_chain_type(\n llm=llm,\n chain_type=\"stuff\",\n retriever=db.as_retriever(),\n return_source_documents=True,\n chain_type_kwargs=chain_type_kwargs,\n )\n\n # Save the metadata and texts in the user session\n cl.user_session.set(\"metadatas\", metadatas)\n cl.user_session.set(\"texts\", all_texts)\n\n # Create a message to inform the user that the files are ready for queries\n content = \"\"\n if len(files) == 1:\n content = f\"`{files[0].name}` processed. You can now ask questions!\"\n logger.info(content)\n else:\n files_names = [f\"`{f.name}`\" for f in files]\n content = f\"{', '.join(files_names)} processed. You can now ask questions.\"\n logger.info(content)\n msg.content = content\n msg.author = \"Chatbot\"\n await msg.update()\n\n # Store the chain in the user session\n cl.user_session.set(\"chain\", chain)\n\n\n@cl.on_message\nasync def main(message: cl.Message):\n # Retrieve the chain from the user session\n chain = cl.user_session.get(\"chain\")\n\n # Create a callback handler\n cb = cl.AsyncLangchainCallbackHandler()\n\n # Get the response from the chain\n response = await chain.acall(message.content, callbacks=[cb])\n logger.info(\"Question: [%s]\", message.content)\n\n # Get the answer and sources from the response\n answer = response[\"answer\"]\n sources = response[\"sources\"].strip()\n source_elements = []\n\n if debug:\n logger.info(\"Answer: [%s]\", answer)\n\n # Get the metadata and texts from the user session\n metadatas = cl.user_session.get(\"metadatas\")\n all_sources = [m[\"source\"] for m in metadatas]\n texts = cl.user_session.get(\"texts\")\n\n if sources:\n found_sources = []\n\n # Add the sources to the message\n for source in sources.split(\",\"):\n source_name = source.strip().replace(\".\", \"\")\n # Get the index of the source\n try:\n index = all_sources.index(source_name)\n except ValueError:\n continue\n text = texts[index]\n found_sources.append(source_name)\n # Create the text element referenced in the message\n source_elements.append(cl.Text(content=text, name=source_name))\n\n if found_sources:\n answer += f\"\\nSources: {', '.join(found_sources)}\"\n else:\n answer += \"\\nNo sources found\"\n\n await cl.Message(content=answer, elements=source_elements).send()\n\n # Setting the AZURE_OPENAI_API_KEY environment variable for the playground\n if api_type == \"azure_ad\":\n os.environ[\"AZURE_OPENAI_API_KEY\"] = token_provider() \n \n You can run the application locally using the following command. The  -w  flag` indicates auto-reload whenever we make changes live in our application code. \n \n chainlit run app.py -w \n \n Build Docker Images \n You can use the  src/01-build-docker-images.sh  Bash script to build the Docker container image for each container app. \n \n #!/bin/bash\n\n# Variables\nsource ./00-variables.sh\n\n# Use a for loop to build the docker images using the array index\nfor index in ${!images[@]}; do\n # Build the docker image\n docker build -t ${images[$index]}:$tag -f Dockerfile --build-arg FILENAME=${filenames[$index]} --build-arg PORT=$port .\ndone \n \n Before running any script in the  src  folder, make sure to customize the value of the variables inside the  00-variables.sh  file located in the same folder. This file is embedded in all the scripts and contains the following variables: \n \n # Variables\n\n# Azure Container Registry\nprefix=\"Contoso\"\nacrName=\"${prefix}Registry\"\nacrResourceGrougName=\"${prefix}RG\"\nlocation=\"EastUS\"\n\n# Python Files\ndocAppFile=\"doc.py\"\nchatAppFile=\"chat.py\"\n\n# Docker Images\ndocImageName=\"doc\"\nchatImageName=\"chat\"\ntag=\"v1\"\nport=\"8000\"\n\n# Arrays\nimages=($docImageName $chatImageName)\nfilenames=($docAppFile $chatAppFile) \n \n The  Dockerfile  under the  src  folder is parametric and can be used to build the container images for both chat applications. \n \n # app/Dockerfile\n\n# # Stage 1 - Install build dependencies\n\n# A Dockerfile must start with a FROM instruction which sets the base image for the container.\n# The Python images come in many flavors, each designed for a specific use case.\n# The python:3.11-slim image is a good base image for most applications.\n# It is a minimal image built on top of Debian Linux and includes only the necessary packages to run Python.\n# The slim image is a good choice because it is small and contains only the packages needed to run Python.\n# For more information, see: \n# * https://hub.docker.com/_/python \n# * https://docs.streamlit.io/knowledge-base/tutorials/deploy/docker\nFROM python:3.11-slim AS builder\n\n# The WORKDIR instruction sets the working directory for any RUN, CMD, ENTRYPOINT, COPY and ADD instructions that follow it in the Dockerfile.\n# If the WORKDIR doesn’t exist, it will be created even if it’s not used in any subsequent Dockerfile instruction.\n# For more information, see: https://docs.docker.com/engine/reference/builder/#workdir\nWORKDIR /app\n\n# Set environment variables. \n# The ENV instruction sets the environment variable <key> to the value <value>.\n# This value will be in the environment of all “descendant” Dockerfile commands and can be replaced inline in many as well.\n# For more information, see: https://docs.docker.com/engine/reference/builder/#env\nENV PYTHONDONTWRITEBYTECODE 1\nENV PYTHONUNBUFFERED 1\n\n# Install git so that we can clone the app code from a remote repo using the RUN instruction.\n# The RUN comand has 2 forms:\n# * RUN <command> (shell form, the command is run in a shell, which by default is /bin/sh -c on Linux or cmd /S /C on Windows)\n# * RUN [\"executable\", \"param1\", \"param2\"] (exec form)\n# The RUN instruction will execute any commands in a new layer on top of the current image and commit the results. \n# The resulting committed image will be used for the next step in the Dockerfile.\n# For more information, see: https://docs.docker.com/engine/reference/builder/#run\nRUN apt-get update && apt-get install -y \\\n build-essential \\\n curl \\\n software-properties-common \\\n git \\\n && rm -rf /var/lib/apt/lists/*\n\n# Create a virtualenv to keep dependencies together\nRUN python -m venv /opt/venv\nENV PATH=\"/opt/venv/bin:$PATH\"\n\n# Clone the requirements.txt which contains dependencies to WORKDIR\n# COPY has two forms:\n# * COPY <src> <dest> (this copies the files from the local machine to the container's own filesystem)\n# * COPY [\"<src>\",... \"<dest>\"] (this form is required for paths containing whitespace)\n# For more information, see: https://docs.docker.com/engine/reference/builder/#copy\nCOPY requirements.txt .\n\n# Install the Python dependencies\nRUN pip install --no-cache-dir --no-deps -r requirements.txt\n\n# Stage 2 - Copy only necessary files to the runner stage\n\n# The FROM instruction initializes a new build stage for the application\nFROM python:3.11-slim\n\n# Define the filename to copy as an argument\nARG FILENAME\n\n# Deefine the port to run the application on as an argument\nARG PORT=8000\n\n# Set an environment variable\nENV FILENAME=${FILENAME}\n\n# Sets the working directory to /app\nWORKDIR /app\n\n# Copy the virtual environment from the builder stage\nCOPY --from=builder /opt/venv /opt/venv\n\n# Set environment variables\nENV PATH=\"/opt/venv/bin:$PATH\"\n\n# Clone the $FILENAME containing the application code\nCOPY $FILENAME .\n\n# Copy the chainlit.md file to the working directory\nCOPY chainlit.md .\n\n# Copy the .chainlit folder to the working directory\nCOPY ./.chainlit ./.chainlit\n\n# The EXPOSE instruction informs Docker that the container listens on the specified network ports at runtime.\n# For more information, see: https://docs.docker.com/engine/reference/builder/#expose\nEXPOSE $PORT\n\n# The ENTRYPOINT instruction has two forms:\n# * ENTRYPOINT [\"executable\", \"param1\", \"param2\"] (exec form, preferred)\n# * ENTRYPOINT command param1 param2 (shell form)\n# The ENTRYPOINT instruction allows you to configure a container that will run as an executable.\n# For more information, see: https://docs.docker.com/engine/reference/builder/#entrypoint\nCMD chainlit run $FILENAME --port=$PORT \n \n Test applications locally \n You can use the  src/02-run-docker-container.sh  Bash script to test the containers for the  sender ,  processor , and  receiver  applications. \n \n #!/bin/bash\n\n# Variables\nsource ./00-variables.sh\n\n# Print the menu\necho \"====================================\"\necho \"Run Docker Container (1-3): \"\necho \"====================================\"\noptions=(\n \"Docs\"\n \"Chat\"\n)\nname=\"\"\n# Select an option\nCOLUMNS=0\nselect option in \"${options[@]}\"; do\n case $option in\n \"Docs\")\n docker run -it \\\n --rm \\\n -p $port:$port \\\n -e AZURE_OPENAI_BASE=$AZURE_OPENAI_BASE \\\n -e AZURE_OPENAI_KEY=$AZURE_OPENAI_KEY \\\n -e AZURE_OPENAI_MODEL=$AZURE_OPENAI_MODEL \\\n -e AZURE_OPENAI_DEPLOYMENT=$AZURE_OPENAI_DEPLOYMENT \\\n -e AZURE_OPENAI_ADA_DEPLOYMENT=$AZURE_OPENAI_ADA_DEPLOYMENT \\\n -e AZURE_OPENAI_VERSION=$AZURE_OPENAI_VERSION \\\n -e AZURE_OPENAI_TYPE=$AZURE_OPENAI_TYPE \\\n -e TEMPERATURE=$TEMPERATURE \\\n --name $docsImageName \\\n $docsImageName:$tag\n break\n ;;\n \"Chat\")\n docker run -it \\\n --rm \\\n -p $port:$port \\\n -e AZURE_OPENAI_BASE=$AZURE_OPENAI_BASE \\\n -e AZURE_OPENAI_KEY=$AZURE_OPENAI_KEY \\\n -e AZURE_OPENAI_MODEL=$AZURE_OPENAI_MODEL \\\n -e AZURE_OPENAI_DEPLOYMENT=$AZURE_OPENAI_DEPLOYMENT \\\n -e AZURE_OPENAI_VERSION=$AZURE_OPENAI_VERSION \\\n -e AZURE_OPENAI_TYPE=$AZURE_OPENAI_TYPE \\\n -e TEMPERATURE=$TEMPERATURE \\\n --name $chatImageName \\\n $chatImageName:$tag\n break\n ;;\n \"Quit\")\n exit\n ;;\n *) echo \"invalid option $REPLY\" ;;\n esac\ndone \n \n Push Docker containers to the Azure Container Registry \n You can use the  src/03-push-docker-image.sh  Bash script to push the Docker container images for the  sender ,  processor , and  receiver  applications to the Azure Container Registry (ACR). \n   \n   \n #!/bin/bash\n\n# Variables\nsource ./00-variables.sh\n\n# Login to ACR\naz acr login --name $acrName \n\n# Retrieve ACR login server. Each container image needs to be tagged with the loginServer name of the registry. \nloginServer=$(az acr show --name $acrName --query loginServer --output tsv)\n\n# Use a for loop to tag and push the local docker images to the Azure Container Registry\nfor index in ${!images[@]}; do\n # Tag the local sender image with the loginServer of ACR\n docker tag ${images[$index],,}:$tag $loginServer/${images[$index],,}:$tag\n\n # Push the container image to ACR\n docker push $loginServer/${images[$index],,}:$tag\ndone \n   \n   \n Deployment Scripts \n If you deployed the Azure infrastructure using the Terraform modules provided with this sample, you only need to deploy the application using the following scripts and YAML templates in the  scripts  folder. \n Scripts: \n   \n \n 09-deploy-apps.sh \n 10-configure-dns.sh \n \n   \n YAML manifests: \n   \n \n configMap.yml \n deployment.yml \n ingress.yml \n service.yml \n \n   \n If you instead want to deploy the application in your AKS cluster, make sure to run all of the scripts in order. \n The  09-deploy-apps.sh  script creates the configmap, deployment, service, and ingress Kubernetes objects for the  chat  and  docs  applications. This script makes use of the yq tool to customize the manifests with the value of the variables defined in the  00-variables.sh  file. This tool is a lightweight and portable command-line YAML, JSON and XML processor that uses jq like syntax but works with YAML files as well as json, xml, properties, csv and tsv. It doesn't yet support everything jq does - but it does support the most common operations and functions, and more is being added continuously. \n \n #!/bin/bash\n\n# Variables\nsource ./00-variables.sh\n\n# Attach ACR to AKS cluster\nif [[ $attachAcr == true ]]; then\n echo \"Attaching ACR $acrName to AKS cluster $aksClusterName...\"\n az aks update \\\n --name $aksClusterName \\\n --resource-group $aksResourceGroupName \\\n --attach-acr $acrName\nfi\n\n# Check if namespace exists in the cluster\nresult=$(kubectl get namespace -o jsonpath=\"{.items[?(@.metadata.name=='$namespace')].metadata.name}\")\n\nif [[ -n $result ]]; then\n echo \"$namespace namespace already exists in the cluster\"\nelse\n echo \"$namespace namespace does not exist in the cluster\"\n echo \"creating $namespace namespace in the cluster...\"\n kubectl create namespace $namespace\nfi\n\n# Create config map\ncat $configMapTemplate |\n yq \"(.data.TITLE)|=\"\\\"\"$title\"\\\" |\n yq \"(.data.LABEL)|=\"\\\"\"$label\"\\\" |\n yq \"(.data.TEMPERATURE)|=\"\\\"\"$temperature\"\\\" |\n yq \"(.data.IMAGE_WIDTH)|=\"\\\"\"$imageWidth\"\\\" |\n yq \"(.data.AZURE_OPENAI_TYPE)|=\"\\\"\"$openAiType\"\\\" |\n yq \"(.data.AZURE_OPENAI_BASE)|=\"\\\"\"$openAiBase\"\\\" |\n yq \"(.data.AZURE_OPENAI_MODEL)|=\"\\\"\"$openAiModel\"\\\" |\n yq \"(.data.AZURE_OPENAI_DEPLOYMENT)|=\"\\\"\"$openAiDeployment\"\\\" |\n kubectl apply -n $namespace -f -\n\n# Create deployment\ncat $deploymentTemplate |\n yq \"(.spec.template.spec.containers[0].image)|=\"\\\"\"$image\"\\\" |\n yq \"(.spec.template.spec.containers[0].imagePullPolicy)|=\"\\\"\"$imagePullPolicy\"\\\" |\n yq \"(.spec.template.spec.serviceAccountName)|=\"\\\"\"$serviceAccountName\"\\\" |\n kubectl apply -n $namespace -f -\n\n# Create deployment\nkubectl apply -f $serviceTemplate -n $namespace \n \n The 10-configure-dns.sh  script creates an A record in the Azure Public DNS Zone to expose the  chat  and  docs  applications via a given subdomain (e.g.,  https://chat.contoso.com ). \n \n # Variables\nsource ./00-variables.sh\nsubdomains=($docsSubdomain $chatSubdomain)\n\n# Install jq if not installed\npath=$(which jq)\n\nif [[ -z $path ]]; then\n echo 'Installing jq...'\n apt install -y jq\nfi\n\n# Choose the ingress controller to use\nif [[ $ingressClassName == \"nginx\" ]]; then\n ingressNamespace=$nginxNamespace\n ingressServiceName=\"${nginxReleaseName}-controller\"\nelse\n ingressNamespace=$webAppRoutingNamespace\n ingressServiceName=$webAppRoutingServiceName\nfi\n\n# Retrieve the public IP address of the NGINX ingress controller\necho \"Retrieving the external IP address of the [$ingressClassName] NGINX ingress controller...\"\npublicIpAddress=$(kubectl get service -o json -n $ingressNamespace |\n jq -r '.items[] | \n select(.spec.type == \"LoadBalancer\" and .metadata.name == \"'$ingressServiceName'\") |\n .status.loadBalancer.ingress[0].ip')\n\nif [ -n \"$publicIpAddress\" ]; then\n echo \"[$publicIpAddress] external IP address of the [$ingressClassName] NGINX ingress controller successfully retrieved\"\nelse\n echo \"Failed to retrieve the external IP address of the [$ingressClassName] NGINX ingress controller\"\n exit\nfi\n\nfor subdomain in ${subdomains[@]}; do\n # Check if an A record for todolist subdomain exists in the DNS Zone\n echo \"Retrieving the A record for the [$subdomain] subdomain from the [$dnsZoneName] DNS zone...\"\n ipv4Address=$(az network dns record-set a list \\\n --zone-name $dnsZoneName \\\n --resource-group $dnsZoneResourceGroupName \\\n --query \"[?name=='$subdomain'].ARecords[].ipv4Address\" \\\n --output tsv \\\n --only-show-errors)\n\n if [[ -n $ipv4Address ]]; then\n echo \"An A record already exists in [$dnsZoneName] DNS zone for the [$subdomain] subdomain with [$ipv4Address] IP address\"\n\n if [[ $ipv4Address == $publicIpAddress ]]; then\n echo \"The [$ipv4Address] ip address of the existing A record is equal to the ip address of the ingress\"\n echo \"No additional step is required\"\n continue\n else\n echo \"The [$ipv4Address] ip address of the existing A record is different than the ip address of the ingress\"\n fi\n # Retrieving name of the record set relative to the zone\n echo \"Retrieving the name of the record set relative to the [$dnsZoneName] zone...\"\n\n recordSetName=$(az network dns record-set a list \\\n --zone-name $dnsZoneName \\\n --resource-group $dnsZoneResourceGroupName \\\n --query \"[?name=='$subdomain'].name\" \\\n --output tsv \\\n --only-show-errors 2>/dev/null)\n\n if [[ -n $recordSetName ]]; then\n echo \"[$recordSetName] record set name successfully retrieved\"\n else\n echo \"Failed to retrieve the name of the record set relative to the [$dnsZoneName] zone\"\n exit\n fi\n\n # Remove the A record\n echo \"Removing the A record from the record set relative to the [$dnsZoneName] zone...\"\n\n az network dns record-set a remove-record \\\n --ipv4-address $ipv4Address \\\n --record-set-name $recordSetName \\\n --zone-name $dnsZoneName \\\n --resource-group $dnsZoneResourceGroupName \\\n --only-show-errors 1>/dev/null\n\n if [[ $? == 0 ]]; then\n echo \"[$ipv4Address] ip address successfully removed from the [$recordSetName] record set\"\n else\n echo \"Failed to remove the [$ipv4Address] ip address from the [$recordSetName] record set\"\n exit\n fi\n fi\n\n # Create the A record\n echo \"Creating an A record in [$dnsZoneName] DNS zone for the [$subdomain] subdomain with [$publicIpAddress] IP address...\"\n az network dns record-set a add-record \\\n --zone-name $dnsZoneName \\\n --resource-group $dnsZoneResourceGroupName \\\n --record-set-name $subdomain \\\n --ipv4-address $publicIpAddress \\\n --only-show-errors 1>/dev/null\n\n if [[ $? == 0 ]]; then\n echo \"A record for the [$subdomain] subdomain with [$publicIpAddress] IP address successfully created in [$dnsZoneName] DNS zone\"\n else\n echo \"Failed to create an A record for the $subdomain subdomain with [$publicIpAddress] IP address in [$dnsZoneName] DNS zone\"\n fi\ndone \n \n YAML manifests \n Below you can read the YAML manifests used to deploy the  chat  chatbot to AKS. For brevity, I will cover only the installation of this application, but you can find all the YAML manifests in the companion GitHub repository. The  chat-configmap.yml  defines a value for the environment variables passed to the application container. The configmap does not define any environment variable for the OpenAI key as the container. \n \n apiVersion: v1\nkind: ConfigMap\nmetadata:\n name: chat-configmap\ndata:\n TEMPERATURE: \"0.9\"\n AZURE_OPENAI_TYPE: azure_ad\n AZURE_OPENAI_BASE: https://contoso.openai.azure.com/\n AZURE_OPENAI_KEY: \"\"\n AZURE_OPENAI_VERSION: 2023-12-01-preview\n AZURE_OPENAI_DEPLOYMENT: gpt-35-turbo\n AZURE_OPENAI_MODEL: gpt-35-turbo\n AZURE_OPENAI_SYSTEM_MESSAGE: \"You are a helpful assistant.\"\n MAX_RETRIES: \"5\"\n TIMEOUT: \"30\"\n DEBUG: \"False\" \n \n These are the parameters defined by the configmap: \n   \n \n TEMPERATURE : the temperature used by the OpenAI API to generate the response. \n AZURE_OPENAI_TYPE : specify  azure  if you want to let the application use the API key to authenticate against OpenAI. In this case, make sure to provide the Key in the  AZURE_OPENAI_KEY  environment variable. If you want to authenticate using an Microsoft Entra ID security token, you need to specify  azure_ad  as a value. In this case, don't need to provide any value in the  AZURE_OPENAI_KEY  environment variable. \n AZURE_OPENAI_BASE : the URL of your Azure OpenAI resource. If you use the API key to authenticate against OpenAI, you can specify the regional endpoint of your Azure OpenAI Service (e.g., https://eastus.api.cognitive.microsoft.com/). If you instead plan to use Microsoft Entra ID security tokens for authentication, you need to deploy your Azure OpenAI Service with a subdomain and specify the resource-specific endpoint url (e.g., https://myopenai.openai.azure.com/). \n AZURE_OPENAI_KEY : the key of your Azure OpenAI resource. If you set  AZURE_OPENAI_TYPE  to  azure_ad  you can leave this parameter empty. \n AZURE_OPENAI_VERSION : A string representing the version of the OpenAI API. \n AZURE_OPENAI_DEPLOYMENT : the name of the ChatGPT deployment used by your Azure OpenAI resource, for example  gpt-35-turbo . \n AZURE_OPENAI_MODEL : the name of the ChatGPT model used by your Azure OpenAI resource, for example  gpt-35-turbo . \n AZURE_OPENAI_SYSTEM_MESSAGE : The content of the system message used for OpenAI API calls. You can use it to describe the assistant's personality. \n \n   \n The  chat-deployment.yml  manifest is used create a Kubernetes deployment that defines the application pods to create. azure.workload.identity/use label is required in the pod template spec. Only pods with this label will be mutated by the azure-workload-identity mutating admission webhook to inject the Azure specific environment variables and the projected service account token volume. \n \n apiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: chat\n labels:\n app: chat\nspec:\n replicas: 3\n selector:\n matchLabels:\n app: chat\n azure.workload.identity/use: \"true\"\n strategy:\n rollingUpdate:\n maxSurge: 1\n maxUnavailable: 1\n minReadySeconds: 5\n template:\n metadata:\n labels:\n app: chat\n azure.workload.identity/use: \"true\"\n prometheus.io/scrape: \"true\"\n spec:\n serviceAccountName: chainlit-sa\n topologySpreadConstraints:\n - maxSkew: 1\n topologyKey: topology.kubernetes.io/zone\n whenUnsatisfiable: DoNotSchedule\n labelSelector:\n matchLabels:\n app: chat\n - maxSkew: 1\n topologyKey: kubernetes.io/hostname\n whenUnsatisfiable: DoNotSchedule\n labelSelector:\n matchLabels:\n app: chat\n nodeSelector:\n \"kubernetes.io/os\": linux\n containers:\n - name: chat\n image: contoso.azurecr.io/chainlitchat:v1\n imagePullPolicy: Always\n resources:\n requests:\n memory: \"128Mi\"\n cpu: \"250m\"\n limits:\n memory: \"256Mi\"\n cpu: \"500m\"\n ports:\n - containerPort: 8000\n livenessProbe:\n httpGet:\n path: /\n port: 8000\n failureThreshold: 1\n initialDelaySeconds: 60\n periodSeconds: 30\n timeoutSeconds: 5\n readinessProbe:\n httpGet:\n path: /\n port: 8000\n failureThreshold: 1\n initialDelaySeconds: 60\n periodSeconds: 30\n timeoutSeconds: 5\n startupProbe:\n httpGet:\n path: /\n port: 8000\n failureThreshold: 1\n initialDelaySeconds: 60\n periodSeconds: 30\n timeoutSeconds: 5\n env:\n - name: TEMPERATURE\n valueFrom:\n configMapKeyRef:\n name: chat-configmap\n key: TEMPERATURE\n - name: AZURE_OPENAI_TYPE\n valueFrom:\n configMapKeyRef:\n name: chat-configmap\n key: AZURE_OPENAI_TYPE\n - name: AZURE_OPENAI_BASE\n valueFrom:\n configMapKeyRef:\n name: chat-configmap\n key: AZURE_OPENAI_BASE\n - name: AZURE_OPENAI_KEY\n valueFrom:\n configMapKeyRef:\n name: chat-configmap\n key: AZURE_OPENAI_KEY\n - name: AZURE_OPENAI_VERSION\n valueFrom:\n configMapKeyRef:\n name: chat-configmap\n key: AZURE_OPENAI_VERSION\n - name: AZURE_OPENAI_DEPLOYMENT\n valueFrom:\n configMapKeyRef:\n name: chat-configmap\n key: AZURE_OPENAI_DEPLOYMENT\n - name: AZURE_OPENAI_MODEL\n valueFrom:\n configMapKeyRef:\n name: chat-configmap\n key: AZURE_OPENAI_MODEL\n - name: AZURE_OPENAI_SYSTEM_MESSAGE\n valueFrom:\n configMapKeyRef:\n name: chat-configmap\n key: AZURE_OPENAI_SYSTEM_MESSAGE\n - name: MAX_RETRIES\n valueFrom:\n configMapKeyRef:\n name: chat-configmap\n key: MAX_RETRIES\n - name: TIMEOUT\n valueFrom:\n configMapKeyRef:\n name: chat-configmap\n key: TIMEOUT\n - name: DEBUG\n valueFrom:\n configMapKeyRef:\n name: docs-configmap\n key: DEBUG \n \n The application is exposed using a  ClusterIP  Kubernetes service. \n \n apiVersion: v1\nkind: Service\nmetadata:\n name: chat\n labels:\n app: chat\nspec:\n type: ClusterIP\n ports:\n - protocol: TCP\n port: 8000\n selector:\n app: chat \n \n The  ingress.yml  manifest defines a Kubernetes ingress object used to expose the service via the NGINX Ingress Controller. This project deploys a managed NGINX Ingress Controller using the application routing add-on and an unmanaged instance of the NGINX Ingress Controller using the Helm Terrafom Provider and related chart. The Terraform module creates two  clusterissuer  objects, one for the managed and one for the unmanaged version of the NGINX Ingress Controller. You can run the following command to see the two ingress classes: \n \n kubectl get ingressclass \n \n Executing the command will produce a result as follows: \n \n NAME CONTROLLER PARAMETERS AGE\nnginx k8s.io/ingress-nginx <none> 4d\nwebapprouting.kubernetes.azure.com webapprouting.kubernetes.azure.com/nginx <none> 4d22h \n \n Run the following command to retrieve the cluster issuers used by the  cert-manager : \n \n kubectl get clusterissuer \n \n The above command should return a result as follows: \n \n NAME READY AGE\nletsencrypt-nginx True 4d2h\nletsencrypt-webapprouting True 4d2h \n \n The  chat-ingress  contains the code of the ingress object used to expose the  chat  application. This version of the ingress makes use of the unmanaged instance of the NGINX Ingress Controller. \n   \n   \n   \n apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n name: chat-ingress\n annotations:\n cert-manager.io/cluster-issuer: letsencrypt-nginx\n cert-manager.io/acme-challenge-type: http01 \n nginx.ingress.kubernetes.io/affinity: \"cookie\"\n nginx.ingress.kubernetes.io/proxy-connect-timeout: \"3600\"\n nginx.ingress.kubernetes.io/proxy-send-timeout: \"3600\"\n nginx.ingress.kubernetes.io/proxy-read-timeout: \"3600\"\n nginx.ingress.kubernetes.io/proxy-next-upstream-timeout: \"3600\"\n nginx.ingress.kubernetes.io/enable-cors: \"true\"\n nginx.ingress.kubernetes.io/cors-allow-origin: \"*\"\n nginx.ingress.kubernetes.io/cors-allow-credentials: \"false\"\nspec:\n ingressClassName: nginx\n tls:\n - hosts:\n - chat.babosbird.com\n secretName: chat-tls-secret\n rules:\n - host: chat.babosbird.com\n http:\n paths:\n - path: /\n pathType: Prefix\n backend:\n service:\n name: chat\n port:\n number: 8000\n \n   \n   \n   \n This version of the ingress makes use of the managed instance of the NGINX Ingress Controller installed by the application routing addon. \n \n apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n name: chat-ingress-webapprouting\n annotations:\n cert-manager.io/cluster-issuer: letsencrypt-webapprouting\n cert-manager.io/acme-challenge-type: http01 \n nginx.ingress.kubernetes.io/affinity: \"cookie\"\n nginx.ingress.kubernetes.io/proxy-connect-timeout: \"3600\"\n nginx.ingress.kubernetes.io/proxy-send-timeout: \"3600\"\n nginx.ingress.kubernetes.io/proxy-read-timeout: \"3600\"\n nginx.ingress.kubernetes.io/proxy-next-upstream-timeout: \"3600\"\n nginx.ingress.kubernetes.io/enable-cors: \"true\"\n nginx.ingress.kubernetes.io/cors-allow-origin: \"*\"\n nginx.ingress.kubernetes.io/cors-allow-credentials: \"false\"\nspec:\n ingressClassName: webapprouting.kubernetes.azure.com\n tls:\n - hosts:\n - chat.babosbird.com\n secretName: chat-tls-secret-webapprouting\n rules:\n - host: chat.babosbird.com\n http:\n paths:\n - path: /\n pathType: Prefix\n backend:\n service:\n name: chat\n port:\n number: 8000 \n \n The ingress object defines the following annotations: \n   \n \n cert-manager.io/cluster-issuer: specifies the name of a cert-manager.io ClusterIssuer to acquire the certificate required for this Ingress. It does not matter which namespace your Ingress resides, as ClusterIssuers are non-namespaced resources. In this sample, the cert-manager is instructed to use the  letsencrypt-nginx  ClusterIssuer that you can create using the  06-create-cluster-issuer.sh  script. \n cert-manager.io/acme-challenge-type: specifies the challend type. \n nginx.ingress.kubernetes.io/affinity: you need to specify this annotation to  cookie  as Chainlit uses WebSockets and requires cookie-based affinity \n nginx.ingress.kubernetes.io/proxy-connect-timeout: specifies the connection timeout in seconds. \n nginx.ingress.kubernetes.io/proxy-send-timeout: specifies the send timeout in seconds. \n nginx.ingress.kubernetes.io/proxy-read-timeout: specifies the read timeout in seconds. \n nginx.ingress.kubernetes.io/proxy-next-upstream-timeout: specifies the next upstream timeout in seconds. \n nginx.ingress.kubernetes.io/enable-cors: to enable Cross-Origin Resource Sharing (CORS) in an Ingress rule, set this annotation to  true . \n nginx.ingress.kubernetes.io/cors-allow-origin: controls what's the accepted Origin for CORS. \n nginx.ingress.kubernetes.io/cors-allow-credentials: controls if credentials can be passed during CORS operations. \n \n Clean up resources \n You can delete the resource group using the following Azure CLI command when you no longer need the resources you created. This will remove all the Azure resources. \n \n az group delete --name <resource-group-name> \n \n Alternatively, you can use the following PowerShell cmdlet to delete the resource group and all the Azure resources. \n   \n   \n Remove-AzResourceGroup -Name <resource-group-name> \n   \n   \n   \n   \n   ","body@stripHtml({\"removeProcessingText\":true,\"removeSpoilerMarkup\":true,\"removeTocMarkup\":true,\"truncateLength\":-1})@stringLength":"133794","kudosSumWeight":12,"repliesCount":4,"readOnly":false,"images":{"__typename":"AssociatedImageConnection","edges":[{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDE","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDExMWk0RkI1M0YwNTY0QkQzNUNF?revision=7\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDI","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDExMmlGRkYwM0NGQkNFNjRGNjU2?revision=7\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDM","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDExNGk2MzlBOEVCOTczMkE0MjYz?revision=7\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDQ","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDExNWlGMkZFRDc0MjAzMEMyOTE5?revision=7\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDU","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDExN2kwNDE3MEU1MEQ2MzQzQkZF?revision=7\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDY","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDExOGk1RTBDNTI2OUMyREU5RDBD?revision=7\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDc","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDExOWk0NzlFNThDNjI3QUNENzM2?revision=7\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDg","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDE4OGk1ODE3ODQxQTMzQzY0REMz?revision=7\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDk","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDE5MWkxQ0I1QzI0Q0VFNTVBRjNC?revision=7\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDEw","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDEyM2k3NzU5RjAxNkVEODBEQjU4?revision=7\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDEx","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDEyNmkzM0FBNzE1RTQ3RDkwNUI4?revision=7\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDEy","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDEyOGk1QUFBNDZBMjFCODc5RkIz?revision=7\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDEz","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDE5Mmk3RDJFMjQ5RTM0QzJGMjgz?revision=7\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDE0","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDE5M2k5OUZFOUQ1RjM1N0MyOTAz?revision=7\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDE1","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDE5NGkzOTUxODk3QThGQjc5N0E1?revision=7\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDE2","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDEzM2kyRTg1QzY5REM0MEM2NzBE?revision=7\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDE3","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDEzNGlFREU5MDY4REM0MjUzNzdG?revision=7\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDE4","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDEzNWk4NjY5NzY3RDQ4QTYyOUI5?revision=7\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDE5","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDI0MDcwLTU0MDEzNmkyQkZDRDkwRDY4ODZEQzEx?revision=7\"}"}}],"totalCount":19,"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}},"videos":{"__typename":"VideoConnection","edges":[],"totalCount":0,"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}},"coverImage":null,"coverImageProperties":{"__typename":"CoverImageProperties","style":"STANDARD","titlePosition":"BOTTOM","altText":""}},"Conversation:conversation:3885602":{"__typename":"Conversation","id":"conversation:3885602","topic":{"__typename":"BlogTopicMessage","uid":3885602},"lastPostingActivityTime":"2024-05-07T05:35:33.028-07:00","solved":false},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTQ5MjkyOGlEQTI4NEI0NjcwRTNENjQx?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTQ5MjkyOGlEQTI4NEI0NjcwRTNENjQx?revision=8","title":"architecture.png","associationType":"TEASER","width":900,"height":516,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTQ5MjkyOWk4RTkzMjIxOUMzMEYyREIz?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTQ5MjkyOWk4RTkzMjIxOUMzMEYyREIz?revision=8","title":"architecture.png","associationType":"BODY","width":900,"height":516,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDExOWk0NzlFNThDNjI3QUNENzM2?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDExOWk0NzlFNThDNjI3QUNENzM2?revision=8","title":"chainlit-welcome-screen.png","associationType":"BODY","width":1399,"height":1072,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDE4OGk1ODE3ODQxQTMzQzY0REMz?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDE4OGk1ODE3ODQxQTMzQzY0REMz?revision=8","title":"chainlit-simple-chat.png","associationType":"BODY","width":1358,"height":1132,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDE5MWkxQ0I1QzI0Q0VFNTVBRjNC?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDE5MWkxQ0I1QzI0Q0VFNTVBRjNC?revision=8","title":"chainlit-format-result.png","associationType":"BODY","width":1399,"height":1154,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEyM2k3NzU5RjAxNkVEODBEQjU4?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEyM2k3NzU5RjAxNkVEODBEQjU4?revision=8","title":"chainlit-dark-mode.png","associationType":"BODY","width":1399,"height":1124,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEyNmkzM0FBNzE1RTQ3RDkwNUI4?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEyNmkzM0FBNzE1RTQ3RDkwNUI4?revision=8","title":"chainlit-before-upload.png","associationType":"BODY","width":1399,"height":1124,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEyOGk1QUFBNDZBMjFCODc5RkIz?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEyOGk1QUFBNDZBMjFCODc5RkIz?revision=8","title":"chainlit-processing-documents.png","associationType":"BODY","width":1399,"height":1124,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDE5Mmk3RDJFMjQ5RTM0QzJGMjgz?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDE5Mmk3RDJFMjQ5RTM0QzJGMjgz?revision=8","title":"chainlit-document-reply.png","associationType":"BODY","width":1358,"height":1132,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDE5M2k5OUZFOUQ1RjM1N0MyOTAz?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDE5M2k5OUZFOUQ1RjM1N0MyOTAz?revision=8","title":"chainlit-chain-of-thought.png","associationType":"BODY","width":1358,"height":3039,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDE5NGkzOTUxODk3QThGQjc5N0E1?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDE5NGkzOTUxODk3QThGQjc5N0E1?revision=8","title":"chainlit-source.png","associationType":"BODY","width":1358,"height":1132,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEzM2kyRTg1QzY5REM0MEM2NzBE?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEzM2kyRTg1QzY5REM0MEM2NzBE?revision=8","title":"chainlit-prompt-playground.png","associationType":"BODY","width":1399,"height":1124,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEzNGlFREU5MDY4REM0MjUzNzdG?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEzNGlFREU5MDY4REM0MjUzNzdG?revision=8","title":"chainlit-prompt-playground-variable.png","associationType":"BODY","width":1399,"height":1124,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEzNWk4NjY5NzY3RDQ4QTYyOUI5?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEzNWk4NjY5NzY3RDQ4QTYyOUI5?revision=8","title":"chainlit-prompt-playground-question.png","associationType":"BODY","width":1399,"height":1124,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEzNmkyQkZDRDkwRDY4ODZEQzEx?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEzNmkyQkZDRDkwRDY4ODZEQzEx?revision=8","title":"chainlit-prompt-playground-reply.png","associationType":"BODY","width":1399,"height":1124,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTQ5Mjk1MWlGODlGNjg1Rjg0MjA4NDI3?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTQ5Mjk1MWlGODlGNjg1Rjg0MjA4NDI3?revision=8","title":"log-stream.png","associationType":"BODY","width":1281,"height":1059,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTQ5Mjk1MmkzNDA5RTNEMEE5Njg1MzRC?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTQ5Mjk1MmkzNDA5RTNEMEE5Njg1MzRC?revision=8","title":"logs.png","associationType":"BODY","width":1281,"height":1059,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTQ5Mjk1M2k5RkVCNEFBRjdEQzgzM0Ey?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTQ5Mjk1M2k5RkVCNEFBRjdEQzgzM0Ey?revision=8","title":"resources.png","associationType":"BODY","width":857,"height":1081,"altText":null},"BlogTopicMessage:message:3885602":{"__typename":"BlogTopicMessage","subject":"Create an Azure OpenAI, LangChain, ChromaDB, and Chainlit Chat App in Container Apps using Terraform","conversation":{"__ref":"Conversation:conversation:3885602"},"id":"message:3885602","revisionNum":8,"uid":3885602,"depth":0,"board":{"__ref":"Blog:board:FastTrackforAzureBlog"},"author":{"__ref":"User:user:988334"},"teaser@stripHtml({\"removeProcessingText\":true,\"truncateLength\":-1})":" \n This article and the companion sample show how to create two Azure Container Apps that use OpenAI, LangChain, ChromaDB, and Chainlit using Terraform. \n \n   \n \n ","introduction":"","metrics":{"__typename":"MessageMetrics","views":81209},"postTime":"2023-07-27T06:47:11.162-07:00","lastPublishTime":"2024-05-07T05:35:33.028-07:00","body@stripHtml({\"removeProcessingText\":true,\"removeSpoilerMarkup\":true,\"removeTocMarkup\":true,\"truncateLength\":-1})":" This article shows how to quickly build chat applications using Python and leveraging powerful technologies such as OpenAI ChatGPT models, Embedding models, LangChain framework, ChromaDB vector database, and Chainlit, an open-source Python package that is specifically designed to create user interfaces (UIs) for AI applications. These applications are hosted on Azure Container Apps, a fully managed environment that enables you to run microservices and containerized applications on a serverless platform. \n \n Simple Chat: This simple chat application utilizes OpenAI's language models to generate real-time completion responses. \n Documents QA Chat: This chat application goes beyond simple conversations. Users can upload up to 10  .pdf  and  .docx  documents, which are then processed to create vector embeddings. These embeddings are stored in ChromaDB for efficient retrieval. Users can pose questions about the uploaded documents and view the Chain of Thought, enabling easy exploration of the reasoning process. The completion message contains links to the text chunks in the documents that were used as a source for the response. \n \n Both applications use a user-defined managed identity to authenticate and authorize against Azure OpenAI Service (AOAI) and Azure Container Registry (ACR) and use Azure Private Endpoints to connect privately and securely to these services. The chat UIs are built using Chainlit, an open-source Python package designed explicitly for creating AI applications. Chainlit seamlessly integrates with LangChain, LlamaIndex, and LangFlow, making it a powerful tool for easily developing ChatGPT-like applications. \n By following our example, you can quickly create sophisticated chat applications that utilize cutting-edge technologies, empowering users with intelligent conversational capabilities. \n   \n You can find the code and Visio diagrams in the companion GitHub repository. Also, check the following articles: \n \n Deploy and run an Azure OpenAI ChatGPT application on AKS via Bicep \n Deploy and run an Azure OpenAI ChatGPT application on AKS via Terraform \n \n   \n Prerequisites \n \n An active Azure subscription. If you don't have one, create a free Azure account before you begin. \n Visual Studio Code installed on one of the supported platforms along with the HashiCorp Terraform. \n Azure CLI version 2.49.0 or later installed. To install or upgrade, see Install Azure CLI. \n aks-preview  Azure CLI extension of version 0.5.140 or later installed \n Terraform v1.5.2 or later. \n \n   \n Architecture \n The following diagram shows the architecture and network topology of the sample: \n   \n \n   \n This sample provides two sets of Terraform modules to deploy the infrastructure and the chat applications. \n   \n Infrastructure Terraform Modules \n You can use the Terraform modules in the  terraform/infra  folder to deploy the infrastructure used by the sample, including the Azure Container Apps Environment, Azure OpenAI Service (AOAI), and Azure Container Registry (ACR), but not the Azure Container Apps (ACA). The Terraform modules in the  terraform/infra  folder deploy the following resources: \n \n azurerm_virtual_network: an Azure Virtual Network with two subnets:\n \n ContainerApps : this subnet hosts the Azure Container Apps Environment. \n PrivateEndpoints : this subnet contains the Azure Private Endpoints to the Azure OpenAI Service (AOAI) and Azure Container Registry (ACR) resources. \n \n \n azurerm_container_app_environment: the Azure Container Apps Environment hosting the Azure Container Apps. \n azurerm_cognitive_account: an Azure OpenAI Service (AOAI) with a GPT-3.5 model used by the chatbot applications. Azure OpenAI Service gives customers advanced language AI with OpenAI GPT-4, GPT-3, Codex, and DALL-E models with Azure's security and enterprise promise. Azure OpenAI co-develops the APIs with OpenAI, ensuring compatibility and a smooth transition from one to the other. The Terraform modules create the following models:\n \n GPT-35: a  gpt-35-turbo-16k  model is used to generate human-like and engaging conversational responses. \n Embeddings model: the  text-embedding-ada-002  model is to transform input documents into meaningful and compact numerical representations called embeddings. Embeddings capture the semantic or contextual information of the input data in a lower-dimensional space, making it easier for machine learning algorithms to process and analyze the data effectively. Embeddings can be stored in a vector database, such as ChromaDB or Facebook AI Similarity Search, explicitly designed for efficient storage, indexing, and retrieval of vector embeddings. \n \n \n azurerm_user_assigned_identity: a user-defined managed identity used by the chatbot applications to acquire a security token to call the Chat Completion API of the ChatGPT model provided by the Azure OpenAI Service and to call the Embedding model. \n azurerm_container_registry: an Azure Container Registry (ACR) to build, store, and manage container images and artifacts in a private registry for all container deployments. In this sample, the registry stores the container images of the two chat applications. \n azurerm_private_endpoint: an Azure Private Endpoint is created for each of the following resources:\n \n Azure OpenAI Service (AOAI) \n Azure Container Registry (ACR) \n \n \n azurerm_private_dns_zone: an Azure Private DNS Zone is created for each of the following resources:\n \n Azure OpenAI Service (AOAI) \n Azure Container Registry (ACR) \n \n \n azurerm_log_analytics_workspace: a centralized Azure Log Analytics workspace is used to collect the diagnostics logs and metrics from all the Azure resources:\n \n Azure OpenAI Service (AOAI) \n Azure Container Registry (ACR) \n Azure Container Apps (ACA) \n \n \n \n   \n Application Terraform Modules \n You can use these Terraform modules in the  terraform/apps  To deploy the Azure Container Apps (ACA) using the Docker container images stored in the Azure Container Registry you deployed in the previous step. \n \n azurerm_container_app: this sample deploys the following applications:\n \n chatapp: this simple chat application utilizes OpenAI's language models to generate real-time completion responses. \n docapp: This chat application goes beyond conversations. Users can upload up to 10  .pdf  and  .docx  documents, which are then processed to create vector embeddings. These embeddings are stored in ChromaDB for efficient retrieval. Users can pose questions about the uploaded documents and view the Chain of Thought, enabling easy exploration of the reasoning process. The completion message contains links to the text chunks in the files that were used as a source for the response. \n \n \n \n   \n Azure Container Apps \n Azure Container Apps (ACA) is a serverless compute service provided by Microsoft Azure that allows developers to easily deploy and manage containerized applications without the need to manage the underlying infrastructure. It provides a simplified and scalable solution for running applications in containers, leveraging the power and flexibility of the Azure ecosystem. \n With Azure Container Apps, developers can package their applications into containers using popular containerization technologies such as Docker. These containers encapsulate the application and its dependencies, ensuring consistent execution across different environments. \n Powered by Kubernetes and open-source technologies like Dapr, KEDA, and envoy, the service abstracts away the complexities of managing the infrastructure, including provisioning, scaling, and monitoring, allowing developers to focus solely on building and deploying their applications. Azure Container Apps handles automatic scaling, and load balancing, and natively integrates with other Azure services, such as Azure Monitor and Azure Container Registry (ACR), to provide a comprehensive and secure application deployment experience. \n Azure Container Apps offers benefits such as rapid deployment, easy scalability, cost-efficiency, and seamless integration with other Azure services, making it an attractive choice for modern application development and deployment scenarios. \n   \n Azure OpenAI Service \n The Azure OpenAI Service is a platform offered by Microsoft Azure that provides cognitive services powered by OpenAI models. One of the models available through this service is the ChatGPT model, which is designed for interactive conversational tasks. It allows developers to integrate natural language understanding and generation capabilities into their applications. \n Azure OpenAI Service provides REST API access to OpenAI's powerful language models including the GPT-3, Codex and Embeddings model series. In addition, the new GPT-4 and ChatGPT model series have now reached general availability. These models can be easily adapted to your specific task, including but not limited to content generation, summarization, semantic search, and natural language-to-code translation. Users can access the service through REST APIs, Python SDK, or our web-based interface in the Azure OpenAI Studio. \n You can use Embeddings model to transform raw data or inputs into meaningful and compact numerical representations called embeddings. Embeddings capture the semantic or contextual information of the input data in a lower-dimensional space, making it easier for machine learning algorithms to process and analyze the data effectively. Embeddings can be stored in a vector database, such as ChromaDB or Facebook AI Similarity Search (FAISS), explicitly designed for efficient storage, indexing, and retrieval of vector embeddings. \n The Chat Completion API, which is part of the Azure OpenAI Service, provides a dedicated interface for interacting with the ChatGPT and GPT-4 models. This API is currently in preview and is the preferred method for accessing these models. The GPT-4 models can only be accessed through this API. \n GPT-3, GPT-3.5, and GPT-4 models from OpenAI are prompt-based. With prompt-based models, the user interacts with the model by entering a text prompt, to which the model responds with a text completion. This completion is the model’s continuation of the input text. While these models are compelling, their behavior is also very sensitive to the prompt. This makes prompt construction a critical skill to develop. For more information, see Introduction to prompt engineering. \n Prompt construction can be complex. In practice, the prompt acts to configure the model weights to complete the desired task, but it's more of an art than a science, often requiring experience and intuition to craft a successful prompt. The goal of this article is to help get you started with this learning process. It attempts to capture general concepts and patterns that apply to all GPT models. However, it's essential to understand that each model behaves differently, so the learnings may not apply equally to all models. \n Prompt engineering refers to the process of creating instructions called prompts for Large Language Models (LLMs), such as OpenAI’s ChatGPT. With the immense potential of LLMs to solve a wide range of tasks, leveraging prompt engineering can empower us to save significant time and facilitate the development of impressive applications. It holds the key to unleashing the full capabilities of these huge models, transforming how we interact and benefit from them. For more information, see Prompt engineering techniques. \n   \n Vector Databases \n A vector database is a specialized database that goes beyond traditional storage by organizing information to simplify the search for similar items. Instead of merely storing words or numbers, it leverages vector embeddings - unique numerical representations of data. These embeddings capture meaning, context, and relationships. For instance, words are represented as vectors, whereas similar words have similar vector values. \n The applications of vector databases are numerous and powerful. In language processing, they facilitate the discovery of related documents or sentences. By comparing the vector embeddings of different texts, finding similar or related information becomes faster and more efficient. This capability benefits search engines and recommendation systems, which can suggest relevant articles or products based on user interests. \n In the realm of image analysis, vector databases excel in finding visually similar images. By representing images as vectors, a simple comparison of vector values can identify visually similar images. This capability is precious for tasks like reverse image search or content-based image retrieval. \n Additionally, vector databases find applications in fraud detection, anomaly detection, and clustering. By comparing vector embeddings of data points, unusual patterns can be detected, and similar items can be grouped together, aiding in effective data analysis and decision-making.  This is a list of Azure services that are suitable for use as a vector database in a retrieval-augmented generation (RAG) solution: \n   \n \n \n Azure Cosmos DB for MongoDB vCore: vCore-based Azure Cosmos DB for MongoDB provides developers with a fully managed MongoDB-compatible database service for building modern applications with a familiar architecture. Developers can enjoy the benefits of native Azure integrations, low total cost of ownership (TCO), and the familiar vCore architecture when migrating existing applications or building new ones. Azure Cosmos DB for MongoDB features built-in vector database capabilities enabling your data and vectors to be stored together for efficient and accurate vector searches. \n \n \n Azure Cosmos DB for NoSQL: Azure Cosmos DB for NoSQL is a globally distributed database service designed for scalable and high performance applications. It offers an industry-leading 99.999% Service Level Agreement (SLA), ensuring high availability for your mission-critical applications. With sub-10ms point reads and instant autoscale, it provides lightning-fast data access and seamless scalability. Its flexible, schemaless data model allows for agile and adaptable application development. Moreover, Azure Cosmos DB’s built-in vector index using DiskANN enables fast, accurate, and cost-effective vector search at any scale, enhancing the efficiency and effectiveness of your data-driven applications. \n \n \n Azure Cosmos DB for PostgreSQL You can use the natively integrated vector database in Azure Cosmos DB for PostgreSQL, which offers an efficient way to store, index, and search high-dimensional vector data directly alongside other application data. This approach removes the necessity of migrating your data to costlier alternative vector databases and provides a seamless integration of your AI-driven applications. \n \n \n Azure Cache for Redis Azure Cache for Redis can be used as a vector database by combining it models like Azure OpenAI for Retrieval-Augmented Generative AI and analysis scenarios. \n \n \n   \n Here is a list of the most popular vector databases: \n   \n \n ChromaDB is a powerful database solution that stores and retrieves vector embeddings efficiently. It is commonly used in AI applications, including chatbots and document analysis systems. By storing embeddings in ChromaDB, users can easily search and retrieve similar vectors, enabling faster and more accurate matching or recommendation processes. ChromaDB offers excellent scalability high performance, and supports various indexing techniques to optimize search operations. It is a versatile tool that enhances the functionality and efficiency of AI applications that rely on vector embeddings. \n Facebook AI Similarity Search (FAISS) is another widely used vector database. Facebook AI Research develops it and offers highly optimized algorithms for similarity search and clustering of vector embeddings. FAISS is known for its speed and scalability, making it suitable for large-scale applications. It offers different indexing methods like flat, IVF (Inverted File System), and HNSW (Hierarchical Navigable Small World) to organize and search vector data efficiently. \n SingleStore: SingleStore aims to deliver the world’s fastest distributed SQL database for data-intensive applications: SingleStoreDB, which combines transactional + analytical workloads in a single platform. \n Astra DB: DataStax Astra DB is a cloud-native, multi-cloud, fully managed database-as-a-service based on Apache Cassandra, which aims to accelerate application development and reduce deployment time for applications from weeks to minutes. \n Milvus: Milvus is an open source vector database built to power embedding similarity search and AI applications. Milvus makes unstructured data search more accessible and provides a consistent user experience regardless of the deployment environment. Milvus 2.0 is a cloud-native vector database with storage and computation separated by design. All components in this refactored version of Milvus are stateless to enhance elasticity and flexibility. \n Qdrant: Qdrant is a vector similarity search engine and database for AI applications. Along with open-source, Qdrant is also available in the cloud. It provides a production-ready service with an API to store, search, and manage points—vectors with an additional payload. Qdrant is tailored to extended filtering support. It makes it useful for all sorts of neural network or semantic-based matching, faceted search, and other applications. \n Pinecone: Pinecone is a fully managed vector database that makes adding vector search to production applications accessible. It combines state-of-the-art vector search libraries, advanced features such as filtering, and distributed infrastructure to provide high performance and reliability at any scale. \n Vespa: Vespa is a platform for applications combining data and AI online. Building such applications on Vespa helps users avoid integration work to get features, and it can scale to support any amount of traffic and data. To deliver that, Vespa provides a broad range of query capabilities, a computation engine with support for modern machine-learned models, hands-off operability, data management, and application development support. It is free and open source to use under the Apache 2.0 license. \n Zilliz: Milvus is an open-source vector database, with over 18,409 stars on GitHub and 3.4 million+ downloads. Milvus supports billion-scale vector search and has over 1,000 enterprise users. Zilliz Cloud provides a fully-managed Milvus service made by the creators of Milvus. This helps to simplify the process of deploying and scaling vector search applications by eliminating the need to create and maintain complex data infrastructure. As a DBaaS, Zilliz simplifies the process of deploying and scaling vector search applications by eliminating the need to create and maintain complex data infrastructure. \n Weaviate: Weaviate is an open-source vector database used to store data objects and vector embeddings from ML-models, and scale into billions of data objects from the same name company in Amsterdam. Users can index billions of data objects to search through and combine multiple search techniques, such as keyword-based and vector search, to provide search experiences. \n \n   \n This sample makes of ChromaDB vector database, but you can easily modify the code to use another vector database. You can even use Azure Cache for Redis Enterprise to store the vector embeddings and compute vector similarity with high performance and low latency. For more information, see Vector Similarity Search with Azure Cache for Redis Enterprise \n   \n LangChain \n LangChain is a software framework designed to streamline the development of applications using large language models (LLMs). It serves as a language model integration framework, facilitating various applications like document analysis and summarization, chatbots, and code analysis. \n LangChain's integrations cover an extensive range of systems, tools, and services, making it a comprehensive solution for language model-based applications. LangChain integrates with the major cloud platforms such as Microsoft Azure, Amazon AWS, and Google, and with API wrappers for various purposes like news, movie information, and weather, as well as support for Bash, web scraping, and more. It also supports multiple language models, including those from OpenAI, Anthropic, and Hugging Face. Moreover, LangChain offers various functionalities for document handling, code generation, analysis, debugging, and interaction with databases and other data sources. \n   \n Chainlit \n Chainlit is an open-source Python package that is specifically designed to create user interfaces (UIs) for AI applications. It simplifies the process of building interactive chats and interfaces, making developing AI-powered applications faster and more efficient. While Streamlit is a general-purpose UI library, Chainlit is purpose-built for AI applications and seamlessly integrates with other AI technologies such as LangChain, LlamaIndex, and LangFlow. \n With Chainlit, developers can easily create intuitive UIs for their AI models, including ChatGPT-like applications. It provides a user-friendly interface for users to interact with AI models, enabling conversational experiences and information retrieval. Chainlit also offers unique features, such as displaying the Chain of Thought, which allows users to explore the reasoning process directly within the UI. This feature enhances transparency and enables users to understand how the AI arrives at its responses or recommendations. \n For more information, see the following resources: \n \n Documentation \n Examples \n API Reference \n Cookbook \n \n   \n Deploy the Infrastructure \n Before deploying the Terraform modules in the  terraform/infra  folder, specify a value for the following variables in the terraform.tfvars variable definitions file. \n   \n   \n name_prefix = \"Blue\"\nlocation = \"EastUS\" \n   \n   \n This is the definition of each variable: \n \n prefix : specifies a prefix for all the Azure resources. \n location : specifies the region (e.g., EastUS) where deploying the Azure resources. \n \n NOTE: Make sure to select a region where Azure OpenAI Service (AOAI) supports both GPT-3.5/GPT-4 models like  gpt-35-turbo-16k  and Embeddings models like  text-embedding-ada-002 . \n   \n OpenAI Module \n The following table contains the code from the  terraform/infra/modules/openai/main.tf  Terraform module used to deploy the Azure OpenAI Service. \n   \n   \n resource \"azurerm_cognitive_account\" \"openai\" {\n name = var.name\n location = var.location\n resource_group_name = var.resource_group_name\n kind = \"OpenAI\"\n custom_subdomain_name = var.custom_subdomain_name\n sku_name = var.sku_name\n public_network_access_enabled = var.public_network_access_enabled\n tags = var.tags\n\n identity {\n type = \"SystemAssigned\"\n }\n\n lifecycle {\n ignore_changes = [\n tags\n ]\n }\n}\n\nresource \"azurerm_cognitive_deployment\" \"deployment\" {\n for_each = {for deployment in var.deployments: deployment.name => deployment}\n\n name = each.key\n cognitive_account_id = azurerm_cognitive_account.openai.id\n\n model {\n format = \"OpenAI\"\n name = each.value.model.name\n version = each.value.model.version\n }\n\n scale {\n type = \"Standard\"\n }\n}\n\nresource \"azurerm_monitor_diagnostic_setting\" \"settings\" {\n name = \"DiagnosticsSettings\"\n target_resource_id = azurerm_cognitive_account.openai.id\n log_analytics_workspace_id = var.log_analytics_workspace_id\n\n enabled_log {\n category = \"Audit\"\n\n retention_policy {\n enabled = true\n days = var.log_analytics_retention_days\n }\n }\n\n enabled_log {\n category = \"RequestResponse\"\n\n retention_policy {\n enabled = true\n days = var.log_analytics_retention_days\n }\n }\n\n enabled_log {\n category = \"Trace\"\n\n retention_policy {\n enabled = true\n days = var.log_analytics_retention_days\n }\n }\n\n metric {\n category = \"AllMetrics\"\n\n retention_policy {\n enabled = true\n days = var.log_analytics_retention_days\n }\n }\n} \n   \n   \n Azure Cognitive Services uses custom subdomain names for each resource created through the Azure portal, Azure Cloud Shell, Azure CLI, Bicep, Azure Resource Manager (ARM), or Terraform. Unlike regional endpoints, which were common for all customers in a specific Azure region, custom subdomain names are unique to the resource. Custom subdomain names are required to enable authentication features like Azure Active Directory (Azure AD). We need to specify a custom subdomain for our Azure OpenAI Service, as our chatbot applications will use an Azure AD security token to access it. By default, the  terraform/infra/modules/openai/main.tf  module sets the value of the  custom_subdomain_name  parameter to the lowercase name of the Azure OpenAI resource. For more information on custom subdomains, see Custom subdomain names for Cognitive Services. \n This Terraform module allows you to pass an array containing the definition of one or more model deployments in the  deployments  variable. For more information on model deployments, see Create a resource and deploy a model using Azure OpenAI. The  openai_deployments  variable in the  terraform/infra/variables.tf  file defines the structure and the default models deployed by the sample: \n   \n   \n variable \"openai_deployments\" {\n description = \"(Optional) Specifies the deployments of the Azure OpenAI Service\"\n type = list(object({\n name = string\n model = object({\n name = string\n version = string\n })\n rai_policy_name = string \n }))\n default = [\n {\n name = \"gpt-35-turbo-16k\"\n model = {\n name = \"gpt-35-turbo-16k\"\n version = \"0613\"\n }\n rai_policy_name = \"\"\n },\n {\n name = \"text-embedding-ada-002\"\n model = {\n name = \"text-embedding-ada-002\"\n version = \"2\"\n }\n rai_policy_name = \"\"\n }\n ] \n} \n   \n   \n Alternatively, you can use the Terraform module for deploying Azure OpenAI Service. to deploy Azure OpenAI Service. \n Private Endpoint Module \n The  terraform/infra/main.tf  the module creates Azure Private Endpoints and Azure Private DNDS Zones for each of the following resources: \n \n Azure OpenAI Service (AOAI) \n Azure Container Registry (ACR) \n \n In particular, it creates an Azure Private Endpoint and Azure Private DNDS Zone to the Azure OpenAI Service as shown in the following code snippet: \n   \n   \n module \"openai_private_dns_zone\" {\n source = \"./modules/private_dns_zone\"\n name = \"privatelink.openai.azure.com\"\n resource_group_name = azurerm_resource_group.rg.name\n tags = var.tags\n virtual_networks_to_link = {\n (module.virtual_network.name) = {\n subscription_id = data.azurerm_client_config.current.subscription_id\n resource_group_name = azurerm_resource_group.rg.name\n }\n }\n}\n\nmodule \"openai_private_endpoint\" {\n source = \"./modules/private_endpoint\"\n name = \"${module.openai.name}PrivateEndpoint\"\n location = var.location\n resource_group_name = azurerm_resource_group.rg.name\n subnet_id = module.virtual_network.subnet_ids[var.vm_subnet_name]\n tags = var.tags\n private_connection_resource_id = module.openai.id\n is_manual_connection = false\n subresource_name = \"account\"\n private_dns_zone_group_name = \"AcrPrivateDnsZoneGroup\"\n private_dns_zone_group_ids = [module.openai_private_dns_zone.id]\n}\n \n   \n   \n Below you can read the code of the  terraform/infra/modules/private_endpoint/main.tf  module, which is used to create Azure Private Endpoints: \n   \n   \n resource \"azurerm_private_endpoint\" \"private_endpoint\" {\n name = var.name\n location = var.location\n resource_group_name = var.resource_group_name\n subnet_id = var.subnet_id\n tags = var.tags\n\n private_service_connection {\n name = \"${var.name}Connection\"\n private_connection_resource_id = var.private_connection_resource_id\n is_manual_connection = var.is_manual_connection\n subresource_names = try([var.subresource_name], null)\n request_message = try(var.request_message, null)\n }\n\n private_dns_zone_group {\n name = var.private_dns_zone_group_name\n private_dns_zone_ids = var.private_dns_zone_group_ids\n }\n\n lifecycle {\n ignore_changes = [\n tags\n ]\n }\n} \n   \n   \n Private DNS Zone Module \n In the following box, you can read the code of the  terraform/infra/modules/private_dns_zone/main.tf  module, which is utilized to create the Azure Private DNS Zones. \n   \n   \n resource \"azurerm_private_dns_zone\" \"private_dns_zone\" {\n name = var.name\n resource_group_name = var.resource_group_name\n tags = var.tags\n\n lifecycle {\n ignore_changes = [\n tags\n ]\n }\n}\n\nresource \"azurerm_private_dns_zone_virtual_network_link\" \"link\" {\n for_each = var.virtual_networks_to_link\n\n name = \"link_to_${lower(basename(each.key))}\"\n resource_group_name = var.resource_group_name\n private_dns_zone_name = azurerm_private_dns_zone.private_dns_zone.name\n virtual_network_id = \"/subscriptions/${each.value.subscription_id}/resourceGroups/${each.value.resource_group_name}/providers/Microsoft.Network/virtualNetworks/${each.key}\"\n\n lifecycle {\n ignore_changes = [\n tags\n ]\n }\n} \n   \n   \n Workload Managed Identity Module \n Below you can read the code of the  terraform/infra/modules/managed_identity/main.tf  module, which is used to create the Azure Managed Identity used by the Azure Container Apps to pull container images from the Azure Container Registry, and by the chat applications to connect to the Azure OpenAI Service. You can use a system-assigned or user-assigned managed identity from Azure Active Directory (Azure AD) to let Azure Container Apps access any Azure AD-protected resource. For more information, see Managed identities in Azure Container Apps. You can pull container images from private repositories in an Azure Container Registry using user-assigned or user-assigned managed identities for authentication to avoid using administrative credentials. For more information, see Azure Container Apps image pull with managed identity. This user-defined managed identity is assigned the Cognitive Services User role on the Azure OpenAI Service namespace and ACRPull role on the Azure Container Registry (ACR). By assigning the above roles, you grant the user-defined managed identity access to these resources. \n   \n   \n resource \"azurerm_user_assigned_identity\" \"workload_user_assigned_identity\" {\n name = var.name\n resource_group_name = var.resource_group_name\n location = var.location\n tags = var.tags\n\n lifecycle {\n ignore_changes = [\n tags\n ]\n }\n}\n\nresource \"azurerm_role_assignment\" \"cognitive_services_user_assignment\" {\n scope = var.openai_id\n role_definition_name = \"Cognitive Services User\"\n principal_id = azurerm_user_assigned_identity.workload_user_assigned_identity.principal_id\n skip_service_principal_aad_check = true\n}\n\nresource \"azurerm_role_assignment\" \"acr_pull_assignment\" {\n scope = var.acr_id\n role_definition_name = \"AcrPull\"\n principal_id = azurerm_user_assigned_identity.workload_user_assigned_identity.principal_id\n skip_service_principal_aad_check = true\n} \n   \n   \n Deploy the Applications \n Before deploying the Terraform modules in the  terraform/apps  folder, specify a value for the following variables in the Terraform.tfvars variable definitions file. \n   \n   \n resource_group_name = \"BlueRG\"\ncontainer_app_environment_name = \"BlueEnvironment\"\ncontainer_registry_name = \"BlueRegistry\"\nworkload_managed_identity_name = \"BlueWorkloadIdentity\"\ncontainer_apps = [\n {\n name = \"chatapp\"\n revision_mode = \"Single\"\n ingress = {\n allow_insecure_connections = true\n external_enabled = true\n target_port = 8000\n transport = \"http\"\n traffic_weight = {\n label = \"default\"\n latest_revision = true\n revision_suffix = \"default\"\n percentage = 100\n }\n }\n template = {\n containers = [\n {\n name = \"chat\"\n image = \"chat:v1\"\n cpu = 0.5\n memory = \"1Gi\"\n env = [\n {\n name = \"TEMPERATURE\"\n value = 0.9\n },\n {\n name = \"AZURE_OPENAI_BASE\"\n value = \"https://blueopenai.openai.azure.com/\"\n },\n {\n name = \"AZURE_OPENAI_KEY\"\n value = \"\"\n },\n {\n name = \"AZURE_OPENAI_TYPE\"\n value = \"azure_ad\"\n },\n {\n name = \"AZURE_OPENAI_VERSION\"\n value = \"2023-06-01-preview\"\n },\n {\n name = \"AZURE_OPENAI_DEPLOYMENT\"\n value = \"gpt-35-turbo-16k\"\n },\n {\n name = \"AZURE_OPENAI_MODEL\"\n value = \"gpt-35-turbo-16k\"\n },\n {\n name = \"AZURE_OPENAI_SYSTEM_MESSAGE\"\n value = \"You are a helpful assistant.\"\n },\n {\n name = \"MAX_RETRIES\"\n value = 5\n },\n {\n name = \"BACKOFF_IN_SECONDS\"\n value = \"1\"\n },\n {\n name = \"TOKEN_REFRESH_INTERVAL\"\n value = 2700\n }\n ]\n liveness_probe = {\n failure_count_threshold = 3\n initial_delay = 30\n interval_seconds = 60\n path = \"/\"\n port = 8000\n timeout = 30\n transport = \"HTTP\"\n }\n readiness_probe = {\n failure_count_threshold = 3\n interval_seconds = 60\n path = \"/\"\n port = 8000\n success_count_threshold = 3\n timeout = 30\n transport = \"HTTP\"\n }\n startup_probe = {\n failure_count_threshold = 3\n interval_seconds = 60\n path = \"/\"\n port = 8000\n timeout = 30\n transport = \"HTTP\"\n }\n }\n ]\n min_replicas = 1\n max_replicas = 3\n }\n },\n {\n name = \"docapp\"\n revision_mode = \"Single\"\n ingress = {\n allow_insecure_connections = true\n external_enabled = true\n target_port = 8000\n transport = \"http\"\n traffic_weight = {\n label = \"default\"\n latest_revision = true\n revision_suffix = \"default\"\n percentage = 100\n }\n }\n template = {\n containers = [\n {\n name = \"doc\"\n image = \"doc:v1\"\n cpu = 0.5\n memory = \"1Gi\"\n env = [\n {\n name = \"TEMPERATURE\"\n value = 0.9\n },\n {\n name = \"AZURE_OPENAI_BASE\"\n value = \"https://blueopenai.openai.azure.com/\"\n },\n {\n name = \"AZURE_OPENAI_KEY\"\n value = \"\"\n },\n {\n name = \"AZURE_OPENAI_TYPE\"\n value = \"azure_ad\"\n },\n {\n name = \"AZURE_OPENAI_VERSION\"\n value = \"2023-06-01-preview\"\n },\n {\n name = \"AZURE_OPENAI_DEPLOYMENT\"\n value = \"gpt-35-turbo-16k\"\n },\n {\n name = \"AZURE_OPENAI_MODEL\"\n value = \"gpt-35-turbo-16k\"\n },\n {\n name = \"AZURE_OPENAI_ADA_DEPLOYMENT\"\n value = \"text-embedding-ada-002\"\n },\n {\n name = \"AZURE_OPENAI_SYSTEM_MESSAGE\"\n value = \"You are a helpful assistant.\"\n },\n {\n name = \"MAX_RETRIES\"\n value = 5\n },\n {\n name = \"CHAINLIT_MAX_FILES\"\n value = 10\n },\n {\n name = \"TEXT_SPLITTER_CHUNK_SIZE\"\n value = 1000\n },\n {\n name = \"TEXT_SPLITTER_CHUNK_OVERLAP\"\n value = 10\n },\n {\n name = \"EMBEDDINGS_CHUNK_SIZE\"\n value = 16\n },\n {\n name = \"BACKOFF_IN_SECONDS\"\n value = \"1\"\n },\n {\n name = \"CHAINLIT_MAX_SIZE_MB\"\n value = 100\n },\n {\n name = \"TOKEN_REFRESH_INTERVAL\"\n value = 2700\n }\n ]\n liveness_probe = {\n failure_count_threshold = 3\n initial_delay = 30\n interval_seconds = 60\n path = \"/\"\n port = 8000\n timeout = 30\n transport = \"HTTP\"\n }\n readiness_probe = {\n failure_count_threshold = 3\n interval_seconds = 60\n path = \"/\"\n port = 8000\n success_count_threshold = 3\n timeout = 30\n transport = \"HTTP\"\n }\n startup_probe = {\n failure_count_threshold = 3\n interval_seconds = 60\n path = \"/\"\n port = 8000\n timeout = 30\n transport = \"HTTP\"\n }\n }\n ]\n min_replicas = 1\n max_replicas = 3\n }\n }] \n   \n   \n This is the definition of each variable: \n \n resource_group_name : specifies the name of the resource group that contains the infrastructure resources: Azure OpenAI Service, Azure Container Registry, Azure Container Apps Environment, Azure Log Analytics, and user-defined managed identity. \n container_app_environment_name : the name of the Azure Container Apps Environment in which to deploy the chat applications. \n container_registry_name : the name of Azure Container Registry used to hold the container images of the chat applications. \n workload_managed_identity_name : the name of the user-defined managed identity used by the chat applications to authenticate with Azure OpenAI Service and Azure Container Registry. \n container_apps : the definition of the two chat applications. The application configuration does not specify the following data because the  container_app  module later defines this information:\n \n image : This field contains the name and tag of the container image but not the login server of the Azure Container Registry. \n identity : The identity of the container app. \n registry : The registry hosting the container image for the application. \n AZURE_CLIENT_ID : The client ID of the user-defined managed identity used by the application to authenticate with Azure OpenAI Service and Azure Container Registry. \n AZURE_OPENAI_TYPE : This environment variable specifies the authentication type with Azure OpenAI Service: if you set the value of the  AZURE_OPENAI_TYPE  environment variable to  azure , you need to specify the OpenAI key as a value for the  AZURE_OPENAI_KEY  environment variable. Instead, if you set the value to  azure_ad  in the application code, assign an Azure AD security token to the  openai_api_key  property. For more information, see How to switch between OpenAI and Azure OpenAI endpoints with Python. \n \n \n \n   \n Container App Module \n The  terraform/apps/modules/container_app/main.tf  module is utilized to create the Azure Container Apps. The module defines and uses the following data source for the Azure Container Registry, Azure Container Apps Environment, and user-defined managed identity created when deploying the infrastructure. These data sources are used to access the properties of these Azure resources. \n   \n   \n data \"azurerm_container_app_environment\" \"container_app_environment\" {\n name = var.container_app_environment_name\n resource_group_name = var.resource_group_name\n}\n\ndata \"azurerm_container_registry\" \"container_registry\" {\n name = var.container_registry_name\n resource_group_name = var.resource_group_name\n}\n\ndata \"azurerm_user_assigned_identity\" \"workload_user_assigned_identity\" {\n name = var.workload_managed_identity_name\n resource_group_name = var.resource_group_name\n} \n   \n   \n The module creates and utilizes the following local variables: \n   \n   \n locals {\n identity = {\n type = \"UserAssigned\"\n identity_ids = [data.azurerm_user_assigned_identity.workload_user_assigned_identity.id]\n }\n identity_env = {\n name = \"AZURE_CLIENT_ID\"\n secret_name = null\n value = data.azurerm_user_assigned_identity.workload_user_assigned_identity.client_id\n }\n registry = {\n server = data.azurerm_container_registry.container_registry.login_server\n identity = data.azurerm_user_assigned_identity.workload_user_assigned_identity.id\n }\n} \n   \n   \n This is the explanation of each local variable: \n \n identity : uses the resource ID of the user-defined managed identity to define the  identity  block for each container app deployed by the module. \n identity_env : uses the client ID of the user-defined managed identity to define the value of the  AZURE_CLIENT_ID  environment variable that is appended to the list of environment variables of each container app deployed by the module. \n registry : uses the login server of the Azure Container Registry to define the  registry  block for each container app deployed by the module. \n \n Here is the complete Terraform code of the module: \n   \n   \n data \"azurerm_container_app_environment\" \"container_app_environment\" {\n name = var.container_app_environment_name\n resource_group_name = var.resource_group_name\n}\n\ndata \"azurerm_container_registry\" \"container_registry\" {\n name = var.container_registry_name\n resource_group_name = var.resource_group_name\n}\n\ndata \"azurerm_user_assigned_identity\" \"workload_user_assigned_identity\" {\n name = var.workload_managed_identity_name\n resource_group_name = var.resource_group_name\n}\n\nlocals {\n identity = {\n type = \"UserAssigned\"\n identity_ids = [data.azurerm_user_assigned_identity.workload_user_assigned_identity.id]\n }\n identity_env = {\n name = \"AZURE_CLIENT_ID\"\n secret_name = null\n value = data.azurerm_user_assigned_identity.workload_user_assigned_identity.client_id\n }\n registry = {\n server = data.azurerm_container_registry.container_registry.login_server\n identity = data.azurerm_user_assigned_identity.workload_user_assigned_identity.id\n }\n}\n\nresource \"azurerm_container_app\" \"container_app\" {\n for_each = {for app in var.container_apps: app.name => app}\n\n container_app_environment_id = data.azurerm_container_app_environment.container_app_environment.id\n name = each.key\n resource_group_name = var.resource_group_name\n revision_mode = each.value.revision_mode\n tags = each.value.tags\n\n template {\n max_replicas = each.value.template.max_replicas\n min_replicas = each.value.template.min_replicas\n revision_suffix = each.value.template.revision_suffix\n\n dynamic \"container\" {\n for_each = each.value.template.containers\n\n content {\n cpu = container.value.cpu\n image = \"${data.azurerm_container_registry.container_registry.login_server}/${container.value.image}\"\n memory = container.value.memory\n name = container.value.name\n args = container.value.args\n command = container.value.command\n\n dynamic \"env\" {\n for_each = container.value.env == null ? [local.identity_env] : concat(container.value.env, [local.identity_env])\n\n content {\n name = env.value.name\n secret_name = env.value.secret_name\n value = env.value.value\n }\n }\n\n dynamic \"liveness_probe\" {\n for_each = container.value.liveness_probe == null ? [] : [container.value.liveness_probe]\n\n content {\n port = liveness_probe.value.port\n transport = liveness_probe.value.transport\n failure_count_threshold = liveness_probe.value.failure_count_threshold\n host = liveness_probe.value.host\n initial_delay = liveness_probe.value.initial_delay\n interval_seconds = liveness_probe.value.interval_seconds\n path = liveness_probe.value.path\n timeout = liveness_probe.value.timeout\n\n dynamic \"header\" {\n for_each = liveness_probe.value.header == null ? [] : [liveness_probe.value.header]\n\n content {\n name = header.value.name\n value = header.value.value\n }\n }\n }\n }\n\n dynamic \"readiness_probe\" {\n for_each = container.value.readiness_probe == null ? [] : [container.value.readiness_probe]\n\n content {\n port = readiness_probe.value.port\n transport = readiness_probe.value.transport\n failure_count_threshold = readiness_probe.value.failure_count_threshold\n host = readiness_probe.value.host\n interval_seconds = readiness_probe.value.interval_seconds\n path = readiness_probe.value.path\n success_count_threshold = readiness_probe.value.success_count_threshold\n timeout = readiness_probe.value.timeout\n\n dynamic \"header\" {\n for_each = readiness_probe.value.header == null ? [] : [readiness_probe.value.header]\n\n content {\n name = header.value.name\n value = header.value.value\n }\n }\n }\n }\n\n dynamic \"startup_probe\" {\n for_each = container.value.startup_probe == null ? [] : [container.value.startup_probe]\n\n content {\n port = startup_probe.value.port\n transport = startup_probe.value.transport\n failure_count_threshold = startup_probe.value.failure_count_threshold\n host = startup_probe.value.host\n interval_seconds = startup_probe.value.interval_seconds\n path = startup_probe.value.path\n timeout = startup_probe.value.timeout\n\n dynamic \"header\" {\n for_each = startup_probe.value.header == null ? [] : [startup_probe.value.header]\n\n content {\n name = header.value.name\n value = header.value.name\n }\n }\n }\n }\n\n dynamic \"volume_mounts\" {\n for_each = container.value.volume_mounts == null ? [] : [container.value.volume_mounts]\n\n content {\n name = volume_mounts.value.name\n path = volume_mounts.value.path\n }\n }\n }\n }\n\n dynamic \"volume\" {\n for_each = each.value.template.volume == null ? [] : each.value.template.volume\n\n content {\n name = volume.value.name\n storage_name = volume.value.storage_name\n storage_type = volume.value.storage_type\n }\n }\n }\n\n dynamic \"dapr\" {\n for_each = each.value.dapr == null ? [] : [each.value.dapr]\n\n content {\n app_id = dapr.value.app_id\n app_port = dapr.value.app_port\n app_protocol = dapr.value.app_protocol\n }\n }\n\n dynamic \"identity\" {\n for_each = each.value.identity == null ? [local.identity] : [each.value.identity]\n\n content {\n type = identity.value.type\n identity_ids = identity.value.identity_ids\n }\n }\n\n dynamic \"ingress\" {\n for_each = each.value.ingress == null ? [] : [each.value.ingress]\n\n content {\n target_port = ingress.value.target_port\n allow_insecure_connections = ingress.value.allow_insecure_connections\n external_enabled = ingress.value.external_enabled\n transport = ingress.value.transport\n\n dynamic \"traffic_weight\" {\n for_each = ingress.value.traffic_weight == null ? [] : [ingress.value.traffic_weight]\n\n content {\n percentage = traffic_weight.value.percentage\n label = traffic_weight.value.label\n latest_revision = traffic_weight.value.latest_revision\n revision_suffix = traffic_weight.value.revision_suffix\n }\n }\n }\n }\n\n dynamic \"registry\" {\n for_each = each.value.registry == null ? [local.registry] : concat(each.value.registry, [local.registry])\n\n content {\n server = registry.value.server\n identity = registry.value.identity\n }\n }\n\n dynamic \"secret\" {\n for_each = nonsensitive(toset([for pair in lookup(var.container_app_secrets, each.key, []) : pair.name]))\n\n content {\n name = secret.key\n value = local.container_app_secrets[each.key][secret.key]\n }\n }\n}\n \n   \n   \n As you can notice, the module uses the login server of the Azure Container Registry to create the fully qualified name of the container image of the current container app. \n   \n Managed identities in Azure Container Apps \n Each chat application makes use of a DefaultAzureCredential object to acquire a security token from Azure Active Directory and authenticate and authorize with Azure OpenAI Service (AOAI) and Azure Container Registry (ACR) using the credentials of the user-defined managed identity associated with the container app. \n You can use a managed identity in a running container app to authenticate and authorize with any service that supports Azure AD authentication. With managed identities: \n \n Container apps and applications connect to resources with the managed identity. You don't need to manage credentials in your container apps. \n You can use role-based access control to grant specific permissions to a managed identity. \n System-assigned identities are automatically created and managed. They are deleted when your container app or container app is deleted. \n You can add and delete user-assigned identities and assign them to multiple resources. They are independent of your container app or the container app's lifecycle. \n You can use managed identity to authenticate with a private Azure Container Registry without a username and password to pull containers for your Container App. \n You can use managed identity to create connections for Dapr-enabled applications via Dapr components \n \n For more information, see Managed identities in Azure Container Apps. The workloads running in a container app can use the Azure Identity client libraries to acquire a security token from the Azure Active Directory. You can choose one of the following approaches inside your code: \n \n Use  DefaultAzureCredential , which will attempt to use the  WorkloadIdentityCredential . \n Create a  ChainedTokenCredential  instance that includes  WorkloadIdentityCredential . \n Use  WorkloadIdentityCredential  directly. \n \n The following table provides the minimum package version required for each language's client library. \n   \n \n \n \n Language \n Library \n Minimum Version \n Example \n \n \n \n \n .NET \n Azure.Identity \n 1.9.0 \n Link \n \n \n Go \n azidentity \n 1.3.0 \n Link \n \n \n Java \n azure-identity \n 1.9.0 \n Link \n \n \n JavaScript \n @azure/identity \n 3.2.0 \n Link \n \n \n Python \n azure-identity \n 1.13.0 \n Link \n \n \n \n   \n NOTE: When using Azure Identity client library with Azure Container Apps, the client ID of the managed identity must be specified. When using the  DefaultAzureCredential , you can explicitly specify the client ID of the container app managed identity in the  AZURE_CLIENT_ID  environment variable. \n   \n Simple Chat Application \n The Simple Chat Application is a large language model-based chatbot that allows users to submit general-purpose questions to a GPT model, which generates and streams back human-like and engaging conversational responses. The following picture shows the welcome screen of the chat application. \n \n   \n You can modify the welcome screen in markdown by editing the  chainlit.md  file at the project's root. If you do not want a welcome screen, leave the file empty. The following picture shows what happens when a user submits a new message in the chat. \n \n   \n Chainlit can render messages in markdown format as shown by the following prompt: \n \n   \n Chainlit also provides classes to support the following elements: \n   \n \n Audio: The  Audio  class allows you to display an audio player for a specific audio file in the chatbot user interface. You must provide either a URL or a path or content bytes. \n Avatar: The  Avatar  class allows you to display an avatar image next to a message instead of the author's name. You need to send the element once. Next,, if an avatar's name matches an author's name, the avatar will be automatically displayed. You must provide either a URL or a path or content bytes. \n File: The  File  class allows you to display a button that lets users download the content of the file. You must provide either a URL or a path or content bytes. \n Image: The  Image  class is designed to create and handle image elements to be sent and displayed in the chatbot user interface. You must provide either a URL or a path or content bytes. \n Pdf: The  Pdf  class allows you to display a PDF hosted remotely or locally in the chatbot UI. This class either takes a URL of a PDF hosted online or the path of a local PDF. \n Pyplot: The  Pyplot  class allows you to display a Matplotlib pyplot chart in the chatbot UI. This class takes a pyplot figure. \n TaskList: The  TaskList  class allows you to display a task list next to the chatbot UI. \n Text: The  Text  class allows you to display a text element in the chatbot UI. This class takes a string and creates a text element that can be sent to the UI. It supports the markdown syntax for formatting text. You must provide either a URL or a path or content bytes. \n \n   \n You can click the user icon on the UI to access the chat settings and choose, for example, between the light and dark theme. \n \n   \n The application is built in Python. Let's take a look at the individual parts of the application code. In the following section, the Python code starts by importing the necessary packages/modules. \n   \n   \n # Import packages\nimport os\nimport sys\nfrom openai import AsyncAzureOpenAI\nimport logging\nimport chainlit as cl\nfrom azure.identity import DefaultAzureCredential, get_bearer_token_provider\nfrom dotenv import load_dotenv\nfrom dotenv import dotenv_values\n\n# Load environment variables from .env file\nif os.path.exists(\".env\"):\n load_dotenv(override=True)\n config = dotenv_values(\".env\") \n   \n   \n These are the libraries used by the chat application: \n   \n \n os : This module provides a way of interacting with the operating system, enabling the code to access environment variables, file paths, etc. \n sys : This module provides access to some variables used or maintained by the interpreter and functions that interact with the interpreter. \n openai : The OpenAI Python library provides convenient access to the OpenAI API from applications written in Python. It includes a pre-defined set of classes for API resources that initialize themselves dynamically from API responses which makes it compatible with a wide range of versions of the OpenAI API. You can find usage examples for the OpenAI Python library in our API reference and the OpenAI Cookbook. \n logging : This module provides flexible logging of messages. \n chainlit as cl : This imports the Chainlit library and aliases it as  cl . Chainlit is used to create the UI of the application. \n from azure.identity import DefaultAzureCredential, get_bearer_token_provider : when the  openai_type  property value is  azure_ad,  a  DefaultAzureCredential  object from the Azure Identity client library for Python is used to acquire security token from the Microsoft Entra ID using the credentials of the user-defined managed identity federated with the service account. \n load_dotenv  and  dotenv_values  from  dotenv : Python-dotenv reads key-value pairs from a  .env  file and can set them as environment variables. It helps in the development of applications following the 12-factor principles. \n \n   \n The  requirements.txt  file under the  src  folder contains the list of packages used by the chat applications. You can restore these packages in your environment using the following command: \n \n pip install -r requirements.txt --upgrade \n \n Next, the code reads the value of the environment variables used to initialize Azure OpenAI objects. In addition, it creates a token provider for Azure OpenAI. \n \n # Read environment variables\ntemperature = float(os.environ.get(\"TEMPERATURE\", 0.9))\napi_base = os.getenv(\"AZURE_OPENAI_BASE\")\napi_key = os.getenv(\"AZURE_OPENAI_KEY\")\napi_type = os.environ.get(\"AZURE_OPENAI_TYPE\", \"azure\")\napi_version = os.environ.get(\"AZURE_OPENAI_VERSION\", \"2023-12-01-preview\")\nengine = os.getenv(\"AZURE_OPENAI_DEPLOYMENT\")\nmodel = os.getenv(\"AZURE_OPENAI_MODEL\")\nsystem_content = os.getenv(\n \"AZURE_OPENAI_SYSTEM_MESSAGE\", \"You are a helpful assistant.\"\n)\nmax_retries = int(os.getenv(\"MAX_RETRIES\", 5))\ntimeout = int(os.getenv(\"TIMEOUT\", 30))\ndebug = os.getenv(\"DEBUG\", \"False\").lower() in (\"true\", \"1\", \"t\")\n\n# Create Token Provider\ntoken_provider = get_bearer_token_provider(\n DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\"\n) \n \n Here's a brief explanation of each variable and related environment variable: \n   \n \n temperature : A float value representing the temperature for Create chat completion method of the OpenAI API. It is fetched from the environment variables with a default value of 0.9. \n api_base : The base URL for the OpenAI API. \n api_key : The API key for the OpenAI API. The value of this variable can be null when using a user-assigned managed identity to acquire a security token to access Azure OpenAI. \n api_type : A string representing the type of the OpenAI API. \n api_version : A string representing the version of the OpenAI API. \n engine : The engine used for OpenAI API calls. \n model : The model used for OpenAI API calls. \n system_content : The content of the system message used for OpenAI API calls. \n max_retries : The maximum number of retries for OpenAI API calls. \n timeout : The timeout in seconds. \n debug : When debug is equal to  true ,  t , or  1 , the logger writes the chat completion answers. \n \n   \n In the next section, the code creates the  AsyncAzureOpenAI  client object used by the application to communicate with the Azure OpenAI Service instance. When the  api_type  is equal to  azure , the code initializes the object with the API key. Otherwise, it initializes the  azure_ad_token_provider  property to the token provider created earlier. Then the code creates a logger. \n \n # Configure OpenAI\nif api_type == \"azure\":\n openai = AsyncAzureOpenAI(\n api_version=api_version,\n api_key=api_key,\n azure_endpoint=api_base,\n max_retries=max_retries,\n timeout=timeout,\n )\nelse:\n openai = AsyncAzureOpenAI(\n api_version=api_version,\n azure_endpoint=api_base,\n azure_ad_token_provider=token_provider,\n max_retries=max_retries,\n timeout=timeout\n )\n\n# Configure a logger\nlogging.basicConfig(\n stream=sys.stdout,\n format=\"[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s\",\n level=logging.INFO,\n)\nlogger = logging.getLogger(__name__) \n \n The backoff time is calculated using the  backoff_in_seconds  and  attempt  variables. It follows the formula  backoff_in_seconds * 2 ** attempt + random.uniform(0, 1) . This formula increases the backoff time exponentially with each attempt and adds a random value between 0 and 1 to avoid synchronized retries. \n Next, the code defines a function called  start_chat  that is used to initialize the UI when the user connects to the application or clicks the  New Chat  button. \n   \n   \n .on_chat_start\nasync def start_chat():\n await cl.Avatar(\n name=\"Chatbot\", url=\"https://cdn-icons-png.flaticon.com/512/8649/8649595.png\"\n ).send()\n await cl.Avatar(\n name=\"Error\", url=\"https://cdn-icons-png.flaticon.com/512/8649/8649595.png\"\n ).send()\n await cl.Avatar(\n name=\"User\",\n url=\"https://media.architecturaldigest.com/photos/5f241de2c850b2a36b415024/master/w_1600%2Cc_limit/Luke-logo.png\",\n ).send()\n cl.user_session.set(\n \"message_history\",\n [{\"role\": \"system\", \"content\": system_content}],\n )\n \n   \n   \n Here is a brief explanation of the function steps: \n   \n \n cl.on_chat_start : The on_chat_start decorator registers a callback function  start_chat()  to be called when the Chainlit chat starts. It is used to set up the chat and send avatars for the Chatbot, Error, and User participants in the chat. \n cl.Avatar() : the Avatar class allows you to display an avatar image next to a message instead of the author name. You need to send the element once. Next if the name of an avatar matches the name of an author, the avatar will be automatically displayed. You must provide either a URL or a path or content bytes. \n cl.user_session.set() : This API call sets a value in the user_session dictionary. In this case, it initializes the  message_history  in the user's session with a system content message, which indicates the start of the chat. \n \n Finally, the application defines the method called whenever the user sends a new message in the chat. \n \n @cl.on_message\nasync def on_message(message: cl.Message):\n message_history = cl.user_session.get(\"message_history\")\n message_history.append({\"role\": \"user\", \"content\": message.content})\n logger.info(\"Question: [%s]\", message.content)\n\n # Create the Chainlit response message\n msg = cl.Message(content=\"\")\n\n async for stream_resp in await openai.chat.completions.create(\n model=model,\n messages=message_history,\n temperature=temperature,\n stream=True,\n ):\n if stream_resp and len(stream_resp.choices) > 0:\n token = stream_resp.choices[0].delta.content or \"\"\n await msg.stream_token(token)\n\n if debug:\n logger.info(\"Answer: [%s]\", msg.content)\n\n message_history.append({\"role\": \"assistant\", \"content\": msg.content})\n await msg.send() \n \n Here is a detailed explanation of the function steps: \n   \n \n cl.on_message : The on_message decorator registers a callback function  main(message: str)  to be called when the user submits a new message in the chat. It is the main function responsible for handling the chat logic. \n cl.user_session.get() : This API call retrieves a value from the user's session data stored in the user_session dictionary. In this case, it fetches the  message_history  from the user's session to maintain the chat history. \n message_history.append() : This API call appends a new message to the  message_history  list. It is used to add the user's message and the assistant's response to the chat history. \n cl.Message() : This API call creates a Chainlit Message object. The  Message  class is designed to send, stream, edit, or remove messages in the chatbot user interface. In this sample, the  Message  object is used to stream the OpenAI response in the chat. \n msg.stream_token() : The stream_token method of the Message class streams a token to the response message. It is used to send the response from the OpenAI Chat API in chunks to ensure real-time streaming in the chat. \n await openai.chat.completions.create() : This API call sends a message to the OpenAI Chat API in an asynchronous mode and streams the response. It uses the provided  message_history  as context for generating the assistant's response. \n \n Below, you can read the complete code of the application. \n \n # Import packages\nimport os\nimport sys\nfrom openai import AsyncAzureOpenAI\nimport logging\nimport chainlit as cl\nfrom azure.identity import DefaultAzureCredential, get_bearer_token_provider\nfrom dotenv import load_dotenv\nfrom dotenv import dotenv_values\n\n# Load environment variables from .env file\nif os.path.exists(\".env\"):\n load_dotenv(override=True)\n config = dotenv_values(\".env\")\n\n# Read environment variables\ntemperature = float(os.environ.get(\"TEMPERATURE\", 0.9))\napi_base = os.getenv(\"AZURE_OPENAI_BASE\")\napi_key = os.getenv(\"AZURE_OPENAI_KEY\")\napi_type = os.environ.get(\"AZURE_OPENAI_TYPE\", \"azure\")\napi_version = os.environ.get(\"AZURE_OPENAI_VERSION\", \"2023-12-01-preview\")\nengine = os.getenv(\"AZURE_OPENAI_DEPLOYMENT\")\nmodel = os.getenv(\"AZURE_OPENAI_MODEL\")\nsystem_content = os.getenv(\n \"AZURE_OPENAI_SYSTEM_MESSAGE\", \"You are a helpful assistant.\"\n)\nmax_retries = int(os.getenv(\"MAX_RETRIES\", 5))\ntimeout = int(os.getenv(\"TIMEOUT\", 30))\ndebug = os.getenv(\"DEBUG\", \"False\").lower() in (\"true\", \"1\", \"t\")\n\n# Create Token Provider\ntoken_provider = get_bearer_token_provider(\n DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\"\n)\n\n# Configure OpenAI\nif api_type == \"azure\":\n openai = AsyncAzureOpenAI(\n api_version=api_version,\n api_key=api_key,\n azure_endpoint=api_base,\n max_retries=max_retries,\n timeout=timeout,\n )\nelse:\n openai = AsyncAzureOpenAI(\n api_version=api_version,\n azure_endpoint=api_base,\n azure_ad_token_provider=token_provider,\n max_retries=max_retries,\n timeout=timeout,\n )\n\n# Configure a logger\nlogging.basicConfig(\n stream=sys.stdout,\n format=\"[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s\",\n level=logging.INFO,\n)\nlogger = logging.getLogger(__name__)\n\n\n@cl.on_chat_start\nasync def start_chat():\n await cl.Avatar(\n name=\"Chatbot\", url=\"https://cdn-icons-png.flaticon.com/512/8649/8649595.png\"\n ).send()\n await cl.Avatar(\n name=\"Error\", url=\"https://cdn-icons-png.flaticon.com/512/8649/8649595.png\"\n ).send()\n await cl.Avatar(\n name=\"You\",\n url=\"https://media.architecturaldigest.com/photos/5f241de2c850b2a36b415024/master/w_1600%2Cc_limit/Luke-logo.png\",\n ).send()\n cl.user_session.set(\n \"message_history\",\n [{\"role\": \"system\", \"content\": system_content}],\n )\n\n\n@cl.on_message\nasync def on_message(message: cl.Message):\n message_history = cl.user_session.get(\"message_history\")\n message_history.append({\"role\": \"user\", \"content\": message.content})\n logger.info(\"Question: [%s]\", message.content)\n\n # Create the Chainlit response message\n msg = cl.Message(content=\"\")\n\n async for stream_resp in await openai.chat.completions.create(\n model=model,\n messages=message_history,\n temperature=temperature,\n stream=True,\n ):\n if stream_resp and len(stream_resp.choices) > 0:\n token = stream_resp.choices[0].delta.content or \"\"\n await msg.stream_token(token)\n\n if debug:\n logger.info(\"Answer: [%s]\", msg.content)\n\n message_history.append({\"role\": \"assistant\", \"content\": msg.content})\n await msg.send() \n \n You can run the application locally using the following command. The  -w  flag` indicates auto-reload whenever we make changes live in our application code. \n \n chainlit run app.py -w \n \n Documents QA Chat \n The Documents QA Chat application allows users to submit up to 10  .pdf  and  .docx  documents. The application processes the uploaded documents to create vector embeddings. These embeddings are stored in ChromaDB vector database for efficient retrieval. Users can pose questions about the uploaded documents and view the Chain of Thought, enabling easy exploration of the reasoning process. The completion message contains links to the text chunks in the documents that were used as a source for the response. The following picture shows the chat application interface. As you can see, you can click the  Browse  button and choose up to 10  .pdf  and  .docx  documents to upload. Alternatively, you can just drag and drop the files over the control area. \n \n   \n After uploading the documents, the application creates and stores embeddings to ChromaDB vector database. During the phase, the UI shows a message  Processing <file-1>, <file-2>... , as shown in the following picture: \n \n   \n When the code finished creating embeddings, the UI is ready to receive user's questions: \n \n   \n As your chat application grows in complexity, understanding the individual steps for generating a specific answer can become challenging. To solve this issue, Chainlit allows you to easily explore the reasoning process right from the user interface using the Chain of Thought. If you are using the LangChain integration, every intermediary step is automatically sent and displayed in the Chainlit UI just clicking and expanding the steps, as shown in the following picture: \n \n   \n To see the text chunks that were used by the large language model to originate the response, you can click the sources links, as shown in the following picture: \n \n   \n In the Chain of Thought, below the step used to invoke the OpenAI chat completion API, you can find an \n  Inspect in prompt playground  icon. Clicking on it opens the Prompt Playground dialog which allows you to modify and iterate on the prompt as needed. \n \n   \n As shown in the following picture, you can click and edit the value of the highlighted variables in the user prompt: \n \n   \n You can then click and edit the user question. \n \n   \n Then, you can click the submit button to test the effect of your changes, as shown in the following picture. \n \n   \n Let's take a look at the individual parts of the application code. In the following section, the Python code starts by importing the necessary packages/modules. \n \n # Import packages\nimport os\nimport io\nimport sys\nimport logging\nimport chainlit as cl\nfrom chainlit.playground.config import AzureChatOpenAI\nfrom pypdf import PdfReader\nfrom docx import Document\nfrom azure.identity import DefaultAzureCredential, get_bearer_token_provider\nfrom dotenv import load_dotenv\nfrom dotenv import dotenv_values\nfrom langchain.embeddings import AzureOpenAIEmbeddings\nfrom langchain.text_splitter import RecursiveCharacterTextSplitter\nfrom langchain.vectorstores.chroma import Chroma\nfrom langchain.chains import RetrievalQAWithSourcesChain\nfrom langchain.chat_models import AzureChatOpenAI\nfrom langchain.prompts.chat import (\n ChatPromptTemplate,\n SystemMessagePromptTemplate,\n HumanMessagePromptTemplate,\n)\n\n# Load environment variables from .env file\nif os.path.exists(\".env\"):\n load_dotenv(override=True)\n config = dotenv_values(\".env\") \n \n These are the libraries used by the chat application: \n   \n \n os : This module provides a way of interacting with the operating system, enabling the code to access environment variables, file paths, etc. \n sys : This module provides access to some variables used or maintained by the interpreter and functions that interact with the interpreter. \n time : This module provides various time-related functions for time manipulation and measurement. \n openai : the OpenAI Python library provides convenient access to the OpenAI API from applications written in the Python language. It includes a pre-defined set of classes for API resources that initialize themselves dynamically from API responses, which makes it compatible with a wide range of versions of the OpenAI API. You can find usage examples for the OpenAI Python library in our API reference and the OpenAI Cookbook. \n logging : This module provides flexible logging of messages. \n chainlit as cl : This imports the Chainlit library and aliases it as  cl.  Chainlit is used to create the UI of the application. \n AzureChatOpenAI  from  chainlit.playground.config import : you need to import  AzureChatOpenAI  from  chainlit.playground.config  to use the Chainlit Playground. \n DefaultAzureCredential  from  azure.identity : when the  openai_type  property value is  azure_ad , a  DefaultAzureCredential  object from the Azure Identity client library for Python - version 1.13.0 is used to acquire security token from the Microsoft Entra ID using the credentials of the user-defined managed identity, whose client ID is defined in the  AZURE_CLIENT_ID  environment variable. \n load_dotenv  and  dotenv_values  from  dotenv : Python-dotenv reads key-value pairs from a  .env  file and can set them as environment variables. It helps in the development of applications following the 12-factor principles. \n langchain : Large language models (LLMs) are emerging as a transformative technology, enabling developers to build applications that they previously could not. However, using these LLMs in isolation is often insufficient for creating a truly powerful app - the real power comes when you can combine them with other sources of computation or knowledge. LangChain library aims to assist in the development of those types of applications. \n \n The  requirements.txt  file under the  src  folder contains the list of packages used by the chat applications. You can restore these packages in your environment using the following command: \n \n pip install -r requirements.txt --upgrade \n \n Next, the code reads environment variables and configures the OpenAI settings. \n \n # Read environment variables\ntemperature = float(os.environ.get(\"TEMPERATURE\", 0.9))\napi_base = os.getenv(\"AZURE_OPENAI_BASE\")\napi_key = os.getenv(\"AZURE_OPENAI_KEY\")\napi_type = os.environ.get(\"AZURE_OPENAI_TYPE\", \"azure\")\napi_version = os.environ.get(\"AZURE_OPENAI_VERSION\", \"2023-12-01-preview\")\nchat_completion_deployment = os.getenv(\"AZURE_OPENAI_DEPLOYMENT\")\nembeddings_deployment = os.getenv(\"AZURE_OPENAI_ADA_DEPLOYMENT\")\nmodel = os.getenv(\"AZURE_OPENAI_MODEL\")\nmax_size_mb = int(os.getenv(\"CHAINLIT_MAX_SIZE_MB\", 100))\nmax_files = int(os.getenv(\"CHAINLIT_MAX_FILES\", 10))\ntext_splitter_chunk_size = int(os.getenv(\"TEXT_SPLITTER_CHUNK_SIZE\", 1000))\ntext_splitter_chunk_overlap = int(os.getenv(\"TEXT_SPLITTER_CHUNK_OVERLAP\", 10))\nembeddings_chunk_size = int(os.getenv(\"EMBEDDINGS_CHUNK_SIZE\", 16))\nmax_retries = int(os.getenv(\"MAX_RETRIES\", 5))\nretry_min_seconds = int(os.getenv(\"RETRY_MIN_SECONDS\", 1))\nretry_max_seconds = int(os.getenv(\"RETRY_MAX_SECONDS\", 5))\ntimeout = int(os.getenv(\"TIMEOUT\", 30))\ndebug = os.getenv(\"DEBUG\", \"False\").lower() in (\"true\", \"1\", \"t\")\n\n# Configure system prompt\nsystem_template = \"\"\"Use the following pieces of context to answer the users question.\nIf you don't know the answer, just say that you don't know, don't try to make up an answer.\nALWAYS return a \"SOURCES\" part in your answer.\nThe \"SOURCES\" part should be a reference to the source of the document from which you got your answer.\n\nExample of your response should be:\n\n\\`\\`\\`\nThe answer is foo\nSOURCES: xyz\n\\`\\`\\`\n\nBegin!\n----------------\n{summaries}\"\"\"\nmessages = [\n SystemMessagePromptTemplate.from_template(system_template),\n HumanMessagePromptTemplate.from_template(\"{question}\"),\n]\nprompt = ChatPromptTemplate.from_messages(messages)\nchain_type_kwargs = {\"prompt\": prompt}\n\n# Configure a logger\nlogging.basicConfig(\n stream=sys.stdout,\n format=\"[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s\",\n level=logging.INFO,\n)\nlogger = logging.getLogger(__name__)\n\n# Create Token Provider\nif api_type == \"azure_ad\":\n token_provider = get_bearer_token_provider(\n DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\"\n )\n\n# Setting the environment variables for the playground\nif api_type == \"azure\":\n os.environ[\"AZURE_OPENAI_API_KEY\"] = api_key\nos.environ[\"AZURE_OPENAI_API_VERSION\"] = api_version\nos.environ[\"AZURE_OPENAI_ENDPOINT\"] = api_base\nos.environ[\"AZURE_OPENAI_DEPLOYMENT_NAME\"] = chat_completion_deployment \n \n Here's a brief explanation of each variable and related environment variable: \n   \n \n temperature : A float value representing the temperature for Create chat completion method of the OpenAI API. It is fetched from the environment variables with a default value of 0.9. \n api_base : The base URL for the OpenAI API. \n api_key : The API key for the OpenAI API. The value of this variable can be null when using a user-assigned managed identity to acquire a security token to access Azure OpenAI. \n api_type : A string representing the type of the OpenAI API. \n api_version : A string representing the version of the OpenAI API. \n chat_completion_deployment : the name of the Azure OpenAI GPT model for chat completion. \n embeddings_deployment : the name of the Azure OpenAI deployment for embeddings. \n model : The model used for chat completion calls (e.g,  gpt-35-turbo-16k ). \n max_size_mb : the maximum size for the uploaded documents. \n max_files : the maximum number of documents that can be uploaded. \n text_splitter_chunk_size : the maximum chunk size used by the  RecursiveCharacterTextSplitter  object. \n text_splitter_chunk_overlap : the maximum chunk overlap used by the  RecursiveCharacterTextSplitter  object. \n embeddings_chunk_size : the maximum chunk size used by the  OpenAIEmbeddings  object. \n max_retries : The maximum number of retries for OpenAI API calls. \n retry_min_seconds : the minimum number of seconds before a retry. \n retry_max_seconds : the maximum number of seconds before a retry. \n timeout : The timeout in seconds. \n system_template : The content of the system message used for OpenAI API calls. \n debug : When debug is equal to  true ,  t , or  1 , the logger switches to verbose mode. \n \n   \n Next, the code defines a function called  start_chat  that is used to initialize the when the user connects to the application or clicks the  New Chat  button. \n \n @cl.on_chat_start\nasync def start_chat():\n # Sending Avatars for Chat Participants\n await cl.Avatar(\n name=\"Chatbot\",\n url=\"https://cdn-icons-png.flaticon.com/512/8649/8649595.png\"\n ).send()\n await cl.Avatar(\n name=\"Error\",\n url=\"https://cdn-icons-png.flaticon.com/512/8649/8649595.png\"\n ).send()\n await cl.Avatar(\n name=\"You\",\n url=\"https://media.architecturaldigest.com/photos/5f241de2c850b2a36b415024/master/w_1600%2Cc_limit/Luke-logo.png\"\n ).send() \n \n Here is a brief explanation of the function steps: \n   \n \n cl.on_chat_start : The on_chat_start decorator registers a callback function  start_chat()  to be called when the Chainlit chat starts. It is used to set up the chat and send avatars for the Chatbot, Error, and User participants in the chat. \n cl.Avatar() : the Avatar class allows you to display an avatar image next to a message instead of the author name. You need to send the element once. Next if the name of an avatar matches the name of an author, the avatar will be automatically displayed. You must provide either a URL or a path or content bytes. \n \n   \n The following code is used to initialize the large language model (LLM) chain used to reply to questions on the content of the uploaded documents. \n \n # Initialize the file list to None\n files = None\n\n # Wait for the user to upload a file\n while files == None:\n files = await cl.AskFileMessage(\n content=f\"Please upload up to {max_files} `.pdf` or `.docx` files to begin.\",\n accept=[\n \"application/pdf\",\n \"application/vnd.openxmlformats-officedocument.wordprocessingml.document\",\n ],\n max_size_mb=max_size_mb,\n max_files=max_files,\n timeout=86400,\n raise_on_timeout=False,\n ).send() \n \n The AskFileMessage API call prompts the user to upload up to a specified number of  .pdf  or  .docx  files. The uploaded files are stored in the  files  variable. The process continues until the user uploads files. For more information, see AskFileMessage. \n The following code processes each uploaded file by extracting its content. \n   \n \n The text content of each file is stored in the list  all_texts . \n This code performs text processing and chunking. It checks the file extension to read the file content accordingly, depending on if it's a  .pdf  or a  .docx  document. \n The text content is split into smaller chunks using the RecursiveCharacterTextSplitter LangChain object. \n Metadata is created for each chunk and stored in the  metadatas  list. \n \n \n # Create a message to inform the user that the files are being processed\n content = \"\"\n if len(files) == 1:\n content = f\"Processing `{files[0].name}`...\"\n else:\n files_names = [f\"`{f.name}`\" for f in files]\n content = f\"Processing {', '.join(files_names)}...\"\n logger.info(content)\n msg = cl.Message(content=content, author=\"Chatbot\")\n await msg.send()\n\n # Create a list to store the texts of each file\n all_texts = []\n\n # Process each file uplodaded by the user\n for file in files:\n # Read file contents\n with open(file.path, \"rb\") as uploaded_file:\n file_contents = uploaded_file.read()\n\n logger.info(\"[%d] bytes were read from %s\", len(file_contents), file.path)\n\n # Create an in-memory buffer from the file content\n bytes = io.BytesIO(file_contents)\n\n # Get file extension\n extension = file.name.split(\".\")[-1]\n\n # Initialize the text variable\n text = \"\"\n\n # Read the file\n if extension == \"pdf\":\n reader = PdfReader(bytes)\n for i in range(len(reader.pages)):\n text += reader.pages[i].extract_text()\n if debug:\n logger.info(\"[%s] read from %s\", text, file.path)\n elif extension == \"docx\":\n doc = Document(bytes)\n paragraph_list = []\n for paragraph in doc.paragraphs:\n paragraph_list.append(paragraph.text)\n if debug:\n logger.info(\"[%s] read from %s\", paragraph.text, file.path)\n text = \"\\n\".join(paragraph_list)\n\n # Split the text into chunks\n text_splitter = RecursiveCharacterTextSplitter(\n chunk_size=text_splitter_chunk_size,\n chunk_overlap=text_splitter_chunk_overlap,\n )\n texts = text_splitter.split_text(text)\n\n # Add the chunks and metadata to the list\n all_texts.extend(texts)\n\n # Create a metadata for each chunk\n metadatas = [{\"source\": f\"{i}-pl\"} for i in range(len(all_texts))] \n \n The next piece of code performs the following steps: \n   \n \n It creates an AzureOpenAIEmbeddings configured to use the embeddings model in the Azure OpenAI Service to create embeddings from text chunks. \n It creates a ChromaDB vector database using the  OpenAIEmbeddings  object, the text chunks list, and the metadata list. \n It creates an AzureChatOpenAI LangChain object based on the GPR model hosted in Azure OpenAI Service. \n It creates a chain using the RetrievalQAWithSourcesChain.from_chain_type API call uses previously created models and stores them as retrievers. \n It stores the metadata and text chunks in the user session using the  cl.user_session.set()  API call. \n It creates a message to inform the user that the files are ready for queries, and finally returns the  chain . \n The  cl.user_session.set(\"chain\", chain)  call stores the LLM chain in the user_session dictionary for later use. \n \n The next section create the LangChain LLM chain. \n \n # Create a Chroma vector store\n if api_type == \"azure\":\n embeddings = AzureOpenAIEmbeddings(\n openai_api_version=api_version,\n openai_api_type=api_type,\n openai_api_key=api_key,\n azure_endpoint=api_base,\n azure_deployment=embeddings_deployment,\n max_retries=max_retries,\n retry_min_seconds=retry_min_seconds,\n retry_max_seconds=retry_max_seconds,\n chunk_size=embeddings_chunk_size,\n timeout=timeout,\n )\n else:\n embeddings = AzureOpenAIEmbeddings(\n openai_api_version=api_version,\n openai_api_type=api_type,\n azure_endpoint=api_base,\n azure_ad_token_provider=token_provider,\n azure_deployment=embeddings_deployment,\n max_retries=max_retries,\n retry_min_seconds=retry_min_seconds,\n retry_max_seconds=retry_max_seconds,\n chunk_size=embeddings_chunk_size,\n timeout=timeout,\n )\n\n # Create a Chroma vector store\n db = await cl.make_async(Chroma.from_texts)(\n all_texts, embeddings, metadatas=metadatas\n )\n\n # Create an AzureChatOpenAI llm\n if api_type == \"azure\":\n llm = AzureChatOpenAI(\n openai_api_type=api_type,\n openai_api_version=api_version,\n openai_api_key=api_key,\n azure_endpoint=api_base,\n temperature=temperature,\n azure_deployment=chat_completion_deployment,\n streaming=True,\n max_retries=max_retries,\n timeout=timeout,\n )\n else:\n llm = AzureChatOpenAI(\n openai_api_type=api_type,\n openai_api_version=api_version,\n azure_endpoint=api_base,\n api_key=api_key,\n temperature=temperature,\n azure_deployment=chat_completion_deployment,\n azure_ad_token_provider=token_provider,\n streaming=True,\n max_retries=max_retries,\n timeout=timeout,\n )\n\n # Create a chain that uses the Chroma vector store\n chain = RetrievalQAWithSourcesChain.from_chain_type(\n llm=llm,\n chain_type=\"stuff\",\n retriever=db.as_retriever(),\n return_source_documents=True,\n chain_type_kwargs=chain_type_kwargs,\n )\n\n # Save the metadata and texts in the user session\n cl.user_session.set(\"metadatas\", metadatas)\n cl.user_session.set(\"texts\", all_texts)\n\n # Create a message to inform the user that the files are ready for queries\n content = \"\"\n if len(files) == 1:\n content = f\"`{files[0].name}` processed. You can now ask questions!\"\n logger.info(content)\n else:\n files_names = [f\"`{f.name}`\" for f in files]\n content = f\"{', '.join(files_names)} processed. You can now ask questions.\"\n logger.info(content)\n msg.content = content\n msg.author = \"Chatbot\"\n await msg.update()\n\n # Store the chain in the user session\n cl.user_session.set(\"chain\", chain) \n \n The following code handles the communication with the OpenAI API and incorporates retrying logic in case the API calls fail due to specific errors. \n   \n \n cl.on_message : The on_message decorator registers a callback function  main(message: str)  to be called when the user submits a new message in the chat. It is the main function responsible for handling the chat logic. \n cl.user_session.get(\"chain\") : this call retrieves the LLM chain from the user_session dictionary. \n cl.AsyncLangchainCallbackHandler : this call creates a LangChain callback handler. \n await chain.acall : The asynchronous call to the RetrievalQAWithSourcesChain.acall executes the LLM chain with the user message as an input. \n \n \n @cl.on_message\nasync def main(message: cl.Message):\n # Retrieve the chain from the user session\n chain = cl.user_session.get(\"chain\")\n\n # Create a callback handler\n cb = cl.AsyncLangchainCallbackHandler()\n\n # Get the response from the chain\n response = await chain.acall(message.content, callbacks=[cb])\n logger.info(\"Question: [%s]\", message.content) \n \n The code below extracts the answers and sources from the API response and formats them to be sent as a message. \n \n The  answer  and  sources  are obtained from the  response  dictionary. \n The sources are then processed to find corresponding texts in the user session metadata ( metadatas ) and create  source_elements  using  cl.Text() . \n cl.Message().send() : the Message API creates and displays a message containing the answer and sources, if available. \n The last command sets the  AZURE_OPENAI_API_KEY  environment variable to a security key to access Azure OpenAI returned by the token provider. This key is used by the Chainlit playground. \n \n   \n   \n # Get the answer and sources from the response\n answer = response[\"answer\"]\n sources = response[\"sources\"].strip()\n source_elements = []\n\n if debug:\n logger.info(\"Answer: [%s]\", answer)\n\n # Get the metadata and texts from the user session\n metadatas = cl.user_session.get(\"metadatas\")\n all_sources = [m[\"source\"] for m in metadatas]\n texts = cl.user_session.get(\"texts\")\n\n if sources:\n found_sources = []\n\n # Add the sources to the message\n for source in sources.split(\",\"):\n source_name = source.strip().replace(\".\", \"\")\n # Get the index of the source\n try:\n index = all_sources.index(source_name)\n except ValueError:\n continue\n text = texts[index]\n found_sources.append(source_name)\n # Create the text element referenced in the message\n source_elements.append(cl.Text(content=text, name=source_name))\n\n if found_sources:\n answer += f\"\\nSources: {', '.join(found_sources)}\"\n else:\n answer += \"\\nNo sources found\"\n\n await cl.Message(content=answer, elements=source_elements).send()\n\n # Setting the AZURE_OPENAI_API_KEY environment variable for the playground\n if api_type == \"azure_ad\":\n os.environ[\"AZURE_OPENAI_API_KEY\"] = token_provider()\n \n   \n   \n   \n   \n Below, you can read the complete code of the application. \n \n # Import packages\nimport os\nimport io\nimport sys\nimport logging\nimport chainlit as cl\nfrom chainlit.playground.config import AzureChatOpenAI\nfrom pypdf import PdfReader\nfrom docx import Document\nfrom azure.identity import DefaultAzureCredential, get_bearer_token_provider\nfrom dotenv import load_dotenv\nfrom dotenv import dotenv_values\nfrom langchain.embeddings import AzureOpenAIEmbeddings\nfrom langchain.text_splitter import RecursiveCharacterTextSplitter\nfrom langchain.vectorstores.chroma import Chroma\nfrom langchain.chains import RetrievalQAWithSourcesChain\nfrom langchain.chat_models import AzureChatOpenAI\nfrom langchain.prompts.chat import (\n ChatPromptTemplate,\n SystemMessagePromptTemplate,\n HumanMessagePromptTemplate,\n)\n\n# Load environment variables from .env file\nif os.path.exists(\".env\"):\n load_dotenv(override=True)\n config = dotenv_values(\".env\")\n\n# Read environment variables\ntemperature = float(os.environ.get(\"TEMPERATURE\", 0.9))\napi_base = os.getenv(\"AZURE_OPENAI_BASE\")\napi_key = os.getenv(\"AZURE_OPENAI_KEY\")\napi_type = os.environ.get(\"AZURE_OPENAI_TYPE\", \"azure\")\napi_version = os.environ.get(\"AZURE_OPENAI_VERSION\", \"2023-12-01-preview\")\nchat_completion_deployment = os.getenv(\"AZURE_OPENAI_DEPLOYMENT\")\nembeddings_deployment = os.getenv(\"AZURE_OPENAI_ADA_DEPLOYMENT\")\nmodel = os.getenv(\"AZURE_OPENAI_MODEL\")\nmax_size_mb = int(os.getenv(\"CHAINLIT_MAX_SIZE_MB\", 100))\nmax_files = int(os.getenv(\"CHAINLIT_MAX_FILES\", 10))\nmax_files = int(os.getenv(\"CHAINLIT_MAX_FILES\", 10))\ntext_splitter_chunk_size = int(os.getenv(\"TEXT_SPLITTER_CHUNK_SIZE\", 1000))\ntext_splitter_chunk_overlap = int(os.getenv(\"TEXT_SPLITTER_CHUNK_OVERLAP\", 10))\nembeddings_chunk_size = int(os.getenv(\"EMBEDDINGS_CHUNK_SIZE\", 16))\nmax_retries = int(os.getenv(\"MAX_RETRIES\", 5))\nretry_min_seconds = int(os.getenv(\"RETRY_MIN_SECONDS\", 1))\nretry_max_seconds = int(os.getenv(\"RETRY_MAX_SECONDS\", 5))\ntimeout = int(os.getenv(\"TIMEOUT\", 30))\ndebug = os.getenv(\"DEBUG\", \"False\").lower() in (\"true\", \"1\", \"t\")\n\n# Configure system prompt\nsystem_template = \"\"\"Use the following pieces of context to answer the users question.\nIf you don't know the answer, just say that you don't know, don't try to make up an answer.\nALWAYS return a \"SOURCES\" part in your answer.\nThe \"SOURCES\" part should be a reference to the source of the document from which you got your answer.\n\nExample of your response should be:\n\n```\nThe answer is foo\nSOURCES: xyz\n```\n\nBegin!\n----------------\n{summaries}\"\"\"\nmessages = [\n SystemMessagePromptTemplate.from_template(system_template),\n HumanMessagePromptTemplate.from_template(\"{question}\"),\n]\nprompt = ChatPromptTemplate.from_messages(messages)\nchain_type_kwargs = {\"prompt\": prompt}\n\n# Configure a logger\nlogging.basicConfig(\n stream=sys.stdout,\n format=\"[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s\",\n level=logging.INFO,\n)\nlogger = logging.getLogger(__name__)\n\n# Create Token Provider\nif api_type == \"azure_ad\":\n token_provider = get_bearer_token_provider(\n DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\"\n )\n\n# Setting the environment variables for the playground\nif api_type == \"azure\":\n os.environ[\"AZURE_OPENAI_API_KEY\"] = api_key\nos.environ[\"AZURE_OPENAI_API_VERSION\"] = api_version\nos.environ[\"AZURE_OPENAI_ENDPOINT\"] = api_base\nos.environ[\"AZURE_OPENAI_DEPLOYMENT_NAME\"] = chat_completion_deployment\n\n\n@cl.on_chat_start\nasync def start():\n await cl.Avatar(\n name=\"Chatbot\", url=\"https://cdn-icons-png.flaticon.com/512/8649/8649595.png\"\n ).send()\n await cl.Avatar(\n name=\"Error\", url=\"https://cdn-icons-png.flaticon.com/512/8649/8649595.png\"\n ).send()\n await cl.Avatar(\n name=\"You\",\n url=\"https://media.architecturaldigest.com/photos/5f241de2c850b2a36b415024/master/w_1600%2Cc_limit/Luke-logo.png\",\n ).send()\n\n # Initialize the file list to None\n files = None\n\n # Wait for the user to upload a file\n while files == None:\n files = await cl.AskFileMessage(\n content=f\"Please upload up to {max_files} `.pdf` or `.docx` files to begin.\",\n accept=[\n \"application/pdf\",\n \"application/vnd.openxmlformats-officedocument.wordprocessingml.document\",\n ],\n max_size_mb=max_size_mb,\n max_files=max_files,\n timeout=86400,\n raise_on_timeout=False,\n ).send()\n\n # Create a message to inform the user that the files are being processed\n content = \"\"\n if len(files) == 1:\n content = f\"Processing `{files[0].name}`...\"\n else:\n files_names = [f\"`{f.name}`\" for f in files]\n content = f\"Processing {', '.join(files_names)}...\"\n logger.info(content)\n msg = cl.Message(content=content, author=\"Chatbot\")\n await msg.send()\n\n # Create a list to store the texts of each file\n all_texts = []\n\n # Process each file uplodaded by the user\n for file in files:\n # Read file contents\n with open(file.path, \"rb\") as uploaded_file:\n file_contents = uploaded_file.read()\n\n logger.info(\"[%d] bytes were read from %s\", len(file_contents), file.path)\n\n # Create an in-memory buffer from the file content\n bytes = io.BytesIO(file_contents)\n\n # Get file extension\n extension = file.name.split(\".\")[-1]\n\n # Initialize the text variable\n text = \"\"\n\n # Read the file\n if extension == \"pdf\":\n reader = PdfReader(bytes)\n for i in range(len(reader.pages)):\n text += reader.pages[i].extract_text()\n if debug:\n logger.info(\"[%s] read from %s\", text, file.path)\n elif extension == \"docx\":\n doc = Document(bytes)\n paragraph_list = []\n for paragraph in doc.paragraphs:\n paragraph_list.append(paragraph.text)\n if debug:\n logger.info(\"[%s] read from %s\", paragraph.text, file.path)\n text = \"\\n\".join(paragraph_list)\n\n # Split the text into chunks\n text_splitter = RecursiveCharacterTextSplitter(\n chunk_size=text_splitter_chunk_size,\n chunk_overlap=text_splitter_chunk_overlap,\n )\n texts = text_splitter.split_text(text)\n\n # Add the chunks and metadata to the list\n all_texts.extend(texts)\n\n # Create a metadata for each chunk\n metadatas = [{\"source\": f\"{i}-pl\"} for i in range(len(all_texts))]\n\n # Create a Chroma vector store\n if api_type == \"azure\":\n embeddings = AzureOpenAIEmbeddings(\n openai_api_version=api_version,\n openai_api_type=api_type,\n openai_api_key=api_key,\n azure_endpoint=api_base,\n azure_deployment=embeddings_deployment,\n max_retries=max_retries,\n retry_min_seconds=retry_min_seconds,\n retry_max_seconds=retry_max_seconds,\n chunk_size=embeddings_chunk_size,\n timeout=timeout,\n )\n else:\n embeddings = AzureOpenAIEmbeddings(\n openai_api_version=api_version,\n openai_api_type=api_type,\n azure_endpoint=api_base,\n azure_ad_token_provider=token_provider,\n azure_deployment=embeddings_deployment,\n max_retries=max_retries,\n retry_min_seconds=retry_min_seconds,\n retry_max_seconds=retry_max_seconds,\n chunk_size=embeddings_chunk_size,\n timeout=timeout,\n )\n\n # Create a Chroma vector store\n db = await cl.make_async(Chroma.from_texts)(\n all_texts, embeddings, metadatas=metadatas\n )\n\n # Create an AzureChatOpenAI llm\n if api_type == \"azure\":\n llm = AzureChatOpenAI(\n openai_api_type=api_type,\n openai_api_version=api_version,\n openai_api_key=api_key,\n azure_endpoint=api_base,\n temperature=temperature,\n azure_deployment=chat_completion_deployment,\n streaming=True,\n max_retries=max_retries,\n timeout=timeout,\n )\n else:\n llm = AzureChatOpenAI(\n openai_api_type=api_type,\n openai_api_version=api_version,\n azure_endpoint=api_base,\n api_key=api_key,\n temperature=temperature,\n azure_deployment=chat_completion_deployment,\n azure_ad_token_provider=token_provider,\n streaming=True,\n max_retries=max_retries,\n timeout=timeout,\n )\n\n # Create a chain that uses the Chroma vector store\n chain = RetrievalQAWithSourcesChain.from_chain_type(\n llm=llm,\n chain_type=\"stuff\",\n retriever=db.as_retriever(),\n return_source_documents=True,\n chain_type_kwargs=chain_type_kwargs,\n )\n\n # Save the metadata and texts in the user session\n cl.user_session.set(\"metadatas\", metadatas)\n cl.user_session.set(\"texts\", all_texts)\n\n # Create a message to inform the user that the files are ready for queries\n content = \"\"\n if len(files) == 1:\n content = f\"`{files[0].name}` processed. You can now ask questions!\"\n logger.info(content)\n else:\n files_names = [f\"`{f.name}`\" for f in files]\n content = f\"{', '.join(files_names)} processed. You can now ask questions.\"\n logger.info(content)\n msg.content = content\n msg.author = \"Chatbot\"\n await msg.update()\n\n # Store the chain in the user session\n cl.user_session.set(\"chain\", chain)\n\n\n@cl.on_message\nasync def main(message: cl.Message):\n # Retrieve the chain from the user session\n chain = cl.user_session.get(\"chain\")\n\n # Create a callback handler\n cb = cl.AsyncLangchainCallbackHandler()\n\n # Get the response from the chain\n response = await chain.acall(message.content, callbacks=[cb])\n logger.info(\"Question: [%s]\", message.content)\n\n # Get the answer and sources from the response\n answer = response[\"answer\"]\n sources = response[\"sources\"].strip()\n source_elements = []\n\n if debug:\n logger.info(\"Answer: [%s]\", answer)\n\n # Get the metadata and texts from the user session\n metadatas = cl.user_session.get(\"metadatas\")\n all_sources = [m[\"source\"] for m in metadatas]\n texts = cl.user_session.get(\"texts\")\n\n if sources:\n found_sources = []\n\n # Add the sources to the message\n for source in sources.split(\",\"):\n source_name = source.strip().replace(\".\", \"\")\n # Get the index of the source\n try:\n index = all_sources.index(source_name)\n except ValueError:\n continue\n text = texts[index]\n found_sources.append(source_name)\n # Create the text element referenced in the message\n source_elements.append(cl.Text(content=text, name=source_name))\n\n if found_sources:\n answer += f\"\\nSources: {', '.join(found_sources)}\"\n else:\n answer += \"\\nNo sources found\"\n\n await cl.Message(content=answer, elements=source_elements).send()\n\n # Setting the AZURE_OPENAI_API_KEY environment variable for the playground\n if api_type == \"azure_ad\":\n os.environ[\"AZURE_OPENAI_API_KEY\"] = token_provider() \n \n You can run the application locally using the following command. The  -w  flag` indicates auto-reload whenever we make changes live in our application code. \n \n chainlit run app.py -w \n \n Build Docker Images \n You can use the   src/01-build-docker-images.sh  Bash script to build the Docker container image for each container app. \n   \n   \n #!/bin/bash\n\n# Variables\nsource ./00-variables.sh\n\n# Use a for loop to build the docker images using the array index\nfor index in ${!images[@]}; do\n # Build the docker image\n docker build -t ${images[$index]}:$tag -f Dockerfile --build-arg FILENAME=${filenames[$index]} --build-arg PORT=$port .\ndone \n   \n   \n   \n   \n Before running any script in the  src  folder, make sure to customize the value of the variables inside the  00-variables.sh  file located in the same folder. This file is embedded in all the scripts and contains the following variables: \n   \n   \n # Variables\n\n# Azure Container Registry\nprefix=\"Blue\"\nacrName=\"${prefix}Registry\"\nacrResourceGrougName=\"${prefix}RG\"\nlocation=\"EastUS\"\n\n# Python Files\ndocAppFile=\"doc.py\"\nchatAppFile=\"chat.py\"\n\n# Docker Images\ndocImageName=\"doc\"\nchatImageName=\"chat\"\ntag=\"v1\"\nport=\"8000\"\n\n# Arrays\nimages=($docImageName $chatImageName)\nfilenames=($docAppFile $chatAppFile) \n   \n   \n The  Dockerfile  under the  src  folder is parametric and can be used to build the container images for both chat applications. \n \n # app/Dockerfile\n\n# # Stage 1 - Install build dependencies\n\n# A Dockerfile must start with a FROM instruction that sets the base image for the container.\n# The Python images come in many flavors, each designed for a specific use case.\n# The python:3.11-slim image is a good base image for most applications.\n# It is a minimal image built on top of Debian Linux and includes only the necessary packages to run Python.\n# The slim image is a good choice because it is small and contains only the packages needed to run Python.\n# For more information, see: \n# * https://hub.docker.com/_/python \n# * https://docs.streamlit.io/knowledge-base/tutorials/deploy/docker\nFROM python:3.11-slim AS builder\n\n# The WORKDIR instruction sets the working directory for any RUN, CMD, ENTRYPOINT, COPY and ADD instructions that follow it in the Dockerfile.\n# If the WORKDIR doesn’t exist, it will be created even if it’s not used in any subsequent Dockerfile instruction.\n# For more information, see: https://docs.docker.com/engine/reference/builder/#workdir\nWORKDIR /app\n\n# Set environment variables. \n# The ENV instruction sets the environment variable <key> to the value <value>.\n# This value will be in the environment of all “descendant” Dockerfile commands and can be replaced inline in many as well.\n# For more information, see: https://docs.docker.com/engine/reference/builder/#env\nENV PYTHONDONTWRITEBYTECODE 1\nENV PYTHONUNBUFFERED 1\n\n# Install git so that we can clone the app code from a remote repo using the RUN instruction.\n# The RUN comand has 2 forms:\n# * RUN <command> (shell form, the command is run in a shell, which by default is /bin/sh -c on Linux or cmd /S /C on Windows)\n# * RUN [\"executable\", \"param1\", \"param2\"] (exec form)\n# The RUN instruction will execute any commands in a new layer on top of the current image and commit the results. \n# The resulting committed image will be used for the next step in the Dockerfile.\n# For more information, see: https://docs.docker.com/engine/reference/builder/#run\nRUN apt-get update && apt-get install -y \\\n build-essential \\\n curl \\\n software-properties-common \\\n git \\\n && rm -rf /var/lib/apt/lists/*\n\n# Create a virtualenv to keep dependencies together\nRUN python -m venv /opt/venv\nENV PATH=\"/opt/venv/bin:$PATH\"\n\n# Clone the requirements.txt which contains dependencies to WORKDIR\n# COPY has two forms:\n# * COPY <src> <dest> (this copies the files from the local machine to the container's own filesystem)\n# * COPY [\"<src>\",... \"<dest>\"] (this form is required for paths containing whitespace)\n# For more information, see: https://docs.docker.com/engine/reference/builder/#copy\nCOPY requirements.txt .\n\n# Install the Python dependencies\nRUN pip install --no-cache-dir --no-deps -r requirements.txt\n\n# Stage 2 - Copy only necessary files to the runner stage\n\n# The FROM instruction initializes a new build stage for the application\nFROM python:3.11-slim\n\n# Define the filename to copy as an argument\nARG FILENAME\n\n# Deefine the port to run the application on as an argument\nARG PORT=8000\n\n# Set an environment variable\nENV FILENAME=${FILENAME}\n\n# Sets the working directory to /app\nWORKDIR /app\n\n# Copy the virtual environment from the builder stage\nCOPY --from=builder /opt/venv /opt/venv\n\n# Set environment variables\nENV PATH=\"/opt/venv/bin:$PATH\"\n\n# Clone the $FILENAME containing the application code\nCOPY $FILENAME .\n\n# Copy the chainlit.md file to the working directory\nCOPY chainlit.md .\n\n# Copy the .chainlit folder to the working directory\nCOPY ./.chainlit ./.chainlit\n\n# The EXPOSE instruction informs Docker that the container listens on the specified network ports at runtime.\n# For more information, see: https://docs.docker.com/engine/reference/builder/#expose\nEXPOSE $PORT\n\n# The ENTRYPOINT instruction has two forms:\n# * ENTRYPOINT [\"executable\", \"param1\", \"param2\"] (exec form, preferred)\n# * ENTRYPOINT command param1 param2 (shell form)\n# The ENTRYPOINT instruction allows you to configure a container that will run as an executable.\n# For more information, see: https://docs.docker.com/engine/reference/builder/#entrypoint\nCMD chainlit run $FILENAME --port=$PORT \n \n Test applications locally \n You can use the  src/02-run-docker-container.sh  Bash script to test the containers for the  sender ,  processor , and  receiver  applications. \n \n #!/bin/bash\n\n# Variables\nsource ./00-variables.sh\n\n# Print the menu\necho \"====================================\"\necho \"Run Docker Container (1-3): \"\necho \"====================================\"\noptions=(\n \"Doc\"\n \"Chat\"\n)\nname=\"\"\n# Select an option\nCOLUMNS=0\nselect option in \"${options[@]}\"; do\n case $option in\n \"Doc\")\n docker run -it \\\n --rm \\\n -p $port:$port \\\n -e AZURE_OPENAI_BASE=$AZURE_OPENAI_BASE \\\n -e AZURE_OPENAI_KEY=$AZURE_OPENAI_KEY \\\n -e AZURE_OPENAI_MODEL=$AZURE_OPENAI_MODEL \\\n -e AZURE_OPENAI_DEPLOYMENT=$AZURE_OPENAI_DEPLOYMENT \\\n -e AZURE_OPENAI_ADA_DEPLOYMENT=$AZURE_OPENAI_ADA_DEPLOYMENT \\\n -e AZURE_OPENAI_VERSION=$AZURE_OPENAI_VERSION \\\n -e AZURE_OPENAI_TYPE=$AZURE_OPENAI_TYPE \\\n -e TEMPERATURE=$TEMPERATURE \\\n --name $docImageName \\\n $docImageName:$tag\n break\n ;;\n \"Chat\")\n docker run -it \\\n --rm \\\n -p $port:$port \\\n -e AZURE_OPENAI_BASE=$AZURE_OPENAI_BASE \\\n -e AZURE_OPENAI_KEY=$AZURE_OPENAI_KEY \\\n -e AZURE_OPENAI_MODEL=$AZURE_OPENAI_MODEL \\\n -e AZURE_OPENAI_DEPLOYMENT=$AZURE_OPENAI_DEPLOYMENT \\\n -e AZURE_OPENAI_VERSION=$AZURE_OPENAI_VERSION \\\n -e AZURE_OPENAI_TYPE=$AZURE_OPENAI_TYPE \\\n -e TEMPERATURE=$TEMPERATURE \\\n --name $chatImageName \\\n $chatImageName:$tag\n break\n ;;\n \"Quit\")\n exit\n ;;\n *) echo \"invalid option $REPLY\" ;;\n esac\ndone \n \n Push Docker containers to the Azure Container Registry \n You can use the  src/03-push-docker-image.sh  Bash script to push the Docker container images for the  sender ,  processor , and  receiver  applications to the Azure Container Registry (ACR) \n   \n   \n #!/bin/bash\n\n# Variables\nsource ./00-variables.sh\n\n# Login to ACR\necho \"Logging in to [${acrName,,}] container registry...\"\naz acr login --name ${acrName,,}\n\n# Retrieve ACR login server. Each container image needs to be tagged with the loginServer name of the registry. \necho \"Retrieving login server for the [${acrName,,}] container registry...\"\nloginServer=$(az acr show --name ${acrName,,} --query loginServer --output tsv)\n\n# Use a for loop to tag and push the local docker images to the Azure Container Registry\nfor index in ${!images[@]}; do\n # Tag the local sender image with the loginServer of ACR\n docker tag ${images[$index],,}:$tag $loginServer/${images[$index],,}:$tag\n\n # Push the container image to ACR\n docker push $loginServer/${images[$index],,}:$tag\ndone \n   \n   \n Monitoring \n Azure Container Apps provides several built-in observability features that together give you a holistic view of your container app’s health throughout its application lifecycle. These features help you monitor and diagnose the state of your app to improve performance and respond to trends and critical problems. \n You can use the  Log Stream  panel on the Azure Portal to see the logs generated by a container app, as shown in the following screenshot. \n   \n \n   \n Alternatively, you can click open the  Logs  panel, as shown in the following screenshot, and use a Kusto Query Language (KQL) query to filter, project, and retrieve only the desired data. \n   \n \n   \n Review deployed resources \n You can use the Azure portal to list the deployed resources in the resource group, as shown in the following picture: \n   \n \n   \n You can also use Azure CLI to list the deployed resources in the resource group: \n \n az resource list --resource-group <resource-group-name> \n \n You can also use the following PowerShell cmdlet to list the deployed resources in the resource group: \n \n Get-AzResource -ResourceGroupName <resource-group-name> \n \n Clean up resources \n You can delete the resource group using the following Azure CLI command when you no longer need the resources you created. This will remove all the Azure resources. \n \n az group delete --name <resource-group-name> \n \n Alternatively, you can use the following PowerShell cmdlet to delete the resource group and all the Azure resources. \n   \n   \n   \n   \n Remove-AzResourceGroup -Name <resource-group-name> \n   \n   \n   \n   \n   \n   \n   \n   ","body@stripHtml({\"removeProcessingText\":true,\"removeSpoilerMarkup\":true,\"removeTocMarkup\":true,\"truncateLength\":-1})@stringLength":"117665","kudosSumWeight":5,"repliesCount":10,"readOnly":false,"images":{"__typename":"AssociatedImageConnection","edges":[{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDE","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTQ5MjkyOGlEQTI4NEI0NjcwRTNENjQx?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDI","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTQ5MjkyOWk4RTkzMjIxOUMzMEYyREIz?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDM","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDExOWk0NzlFNThDNjI3QUNENzM2?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDQ","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDE4OGk1ODE3ODQxQTMzQzY0REMz?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDU","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDE5MWkxQ0I1QzI0Q0VFNTVBRjNC?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDY","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEyM2k3NzU5RjAxNkVEODBEQjU4?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDc","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEyNmkzM0FBNzE1RTQ3RDkwNUI4?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDg","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEyOGk1QUFBNDZBMjFCODc5RkIz?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDk","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDE5Mmk3RDJFMjQ5RTM0QzJGMjgz?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDEw","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDE5M2k5OUZFOUQ1RjM1N0MyOTAz?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDEx","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDE5NGkzOTUxODk3QThGQjc5N0E1?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDEy","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEzM2kyRTg1QzY5REM0MEM2NzBE?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDEz","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEzNGlFREU5MDY4REM0MjUzNzdG?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDE0","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEzNWk4NjY5NzY3RDQ4QTYyOUI5?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDE1","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEzNmkyQkZDRDkwRDY4ODZEQzEx?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDE2","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTQ5Mjk1MWlGODlGNjg1Rjg0MjA4NDI3?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDE3","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTQ5Mjk1MmkzNDA5RTNEMEE5Njg1MzRC?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDE4","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTQ5Mjk1M2k5RkVCNEFBRjdEQzgzM0Ey?revision=8\"}"}}],"totalCount":18,"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}},"videos":{"__typename":"VideoConnection","edges":[],"totalCount":0,"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}},"coverImage":null,"coverImageProperties":{"__typename":"CoverImageProperties","style":"STANDARD","titlePosition":"BOTTOM","altText":""}},"Conversation:conversation:3967434":{"__typename":"Conversation","id":"conversation:3967434","topic":{"__typename":"BlogTopicMessage","uid":3967434},"lastPostingActivityTime":"2024-05-02T00:21:23.582-07:00","solved":false},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zOTY3NDM0LTUyMDQ1MGkxRjQ3RjlFN0I5RDgwNjg4?revision=12\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zOTY3NDM0LTUyMDQ1MGkxRjQ3RjlFN0I5RDgwNjg4?revision=12","title":"architecture.png","associationType":"TEASER","width":820,"height":1000,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zOTY3NDM0LTUyMDQ4N2k3NjdEQUY5QkIyMTA3Rjcz?revision=12\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zOTY3NDM0LTUyMDQ4N2k3NjdEQUY5QkIyMTA3Rjcz?revision=12","title":"architecture.png","associationType":"BODY","width":1400,"height":1000,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zOTY3NDM0LTUyMDQ1M2lFQjEzNzRDQkVCN0U3RTdC?revision=12\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zOTY3NDM0LTUyMDQ1M2lFQjEzNzRDQkVCN0U3RTdC?revision=12","title":"gateway-api.png","associationType":"BODY","width":1794,"height":1406,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zOTY3NDM0LTUyMDQ2MWk3MUM2MEU2MEQzMDg1QkEx?revision=12\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zOTY3NDM0LTUyMDQ2MWk3MUM2MEU2MEQzMDg1QkEx?revision=12","title":"node-resource-group.png","associationType":"BODY","width":798,"height":985,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zOTY3NDM0LTUyMDQ2MmkxQTJDNzU2MkU3QzA5QTcy?revision=12\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zOTY3NDM0LTUyMDQ2MmkxQTJDNzU2MkU3QzA5QTcy?revision=12","title":"sample.png","associationType":"BODY","width":1256,"height":1146,"altText":null},"BlogTopicMessage:message:3967434":{"__typename":"BlogTopicMessage","subject":"Deploying an Azure Kubernetes Service (AKS) Cluster with Application Gateway for Containers","conversation":{"__ref":"Conversation:conversation:3967434"},"id":"message:3967434","revisionNum":12,"uid":3967434,"depth":0,"board":{"__ref":"Blog:board:FastTrackforAzureBlog"},"author":{"__ref":"User:user:988334"},"teaser@stripHtml({\"removeProcessingText\":true,\"truncateLength\":-1})":" This article provides a detailed guide on installing an AKS (Azure Kubernetes Service) cluster and Azure Application Gateway for Containers, all using Bicep. \n \n   \n   ","introduction":"","metrics":{"__typename":"MessageMetrics","views":17995},"postTime":"2023-10-30T01:52:14.882-07:00","lastPublishTime":"2024-05-02T00:21:23.582-07:00","body@stripHtml({\"removeProcessingText\":true,\"removeSpoilerMarkup\":true,\"removeTocMarkup\":true,\"truncateLength\":-1})":" The Application Gateway for Containers is a new cutting-edge Azure service that offers load balancing and dynamic traffic management for applications running in a Kubernetes cluster. As part of Azure's Application Load Balancing portfolio, this innovative product provides an enhanced experience for developers and administrators. The Application Gateway for Containers represents the evolution of the Application Gateway Ingress Controller (AGIC) and enables Azure Kubernetes Service (AKS) customers to leverage Azure's native Application Gateway load balancer. In this article, we will guide you through the process of deploying an Azure Kubernetes Service(AKS) cluster with an Application Gateway for Containers in a fully automated fashion, using either a bring your own (BYO) or managed by ALB deployment. \n For more information, see: \n   \n \n What is Application Gateway for Containers? \n Application Gateway for Containers components \n Quickstart: Deploy Application Gateway for Containers ALB Controller \n Quickstart: Create Application Gateway for Containers - Bring your own deployment \n Quickstart: Create Application Gateway for Containers managed by ALB Controller \n Advanced load balancing scenarios with the new Azure Application Gateway for Containers \n \n Bicep templates, companion code, Grafana dashboards, and Visio diagrams are in this GitHub repository. \n  Prerequisites \n \n An active Azure subscription. If you don't have one, create a free Azure account before you begin. \n Visual Studi - Code installed on one of the supported platforms along with the Bicep extension. \n Azure CLI version 2.50.0 or later installed. to install or upgrade, see Install Azure CLI. \n aks-preview  Azure CLI extension of version 0.5.145 or later installed \n \n You can run  az --version  to verify above versions. \n to install the aks-preview extension, run the following command: \n \n az extension add --name aks-preview \n \n Run the following command to update to the latest version of the extension released: \n \n az extension update --name aks-preview \n \n   \n Architecture \n This sample provides a comprehensive set of Bicep modules that facilitate the deployment of an Azure Kubernetes Service (AKS) cluster with an integrated Application Gateway for Containers. Additionally, it offers modules for the optional deployment of other essential Azure services, including the Azure Monitor managed service for Prometheus resource and an Azure Managed Grafana instance for efficient monitoring of the cluster's performance and overall health status. \n The following diagram illustrates the architecture and network topology implemented by this sample \n   \n \n   \n Bicep modules are parametric, so that you can choose any network plugin. Currently, Application Gateway for Containers currently only supports Azure CNI with static IP allocation and Azure CNI with dynamic IP allocation. In addition, this sample shows how to deploy an Azure Kubernetes Service cluster with the following extensions and features: \n   \n \n Istio-based service mesh add-on for Azure Kubernetes Service provides an officially supported and tested Istio integration for Azure Kubernetes Service (AKS). \n API Server VNET Integration allows you to enable network communication between the API server and the cluster nodes without requiring a private link or tunnel. AKS clusters with API Server VNET integration provide a series of advantages, for example, they can have public network access or private cluster mode enabled or disabled without redeploying the cluster. For more information, see Create an Azure Kubernetes Service cluster with API Server VNet Integration. \n Azure NAT Gateway to manage outbound connections initiated by AKS-hosted workloads. \n Event-driven Autoscaling (KEDA) add-on is a single-purpose and lightweight component that strives to make application autoscaling simple and is a CNCF Incubation project. \n Dapr extension for Azure Kubernetes Service (AKS) allows you to install Dapr, a portable, event-driven runtime that simplifies building resilient, stateless, and stateful applications that run on the cloud and edge and embrace the diversity of languages and developer frameworks. With its sidecar architecture, Dapr helps you tackle the challenges that come with building microservices and keeps your code platform agnostic. \n Flux V2 extension allows to deploy workloads to an Azure Kubernetes Service (AKS) cluster via GitOps. For more information, see GitOps Flux v2 configurations with AKS and Azure Arc-enabled Kubernetes \n Vertical Pod Autoscaling allows you to automatically sets resource requests and limits on containers per workload based on past usage. VPA makes certain pods are scheduled onto nodes that have the required CPU and memory resources. For more information, see Kubernetes Vertical Pod Autoscaling. \n Azure Key Vault Provider for Secrets Store CSI Driver provides a variety of methods of identity-based access to your Azure Key Vault. \n Image Cleaner to clean up stale images on your Azure Kubernetes Service cluster. \n Azure Kubernetes Service (AKS) Network Observability is an important part of maintaining a healthy and performant Kubernetes cluster. By collecting and analyzing data about network traffic, you can gain insights into how your cluster is operating and identify potential problems before they cause outages or performance degradation. \n Windows Server node pool allows running Windows Server containers on an Azure Kubernetes Service (AKS) cluster. You can disable the deployment of a Windows node pool. \n \n In a production environment, we strongly recommend deploying a private AKS cluster with Uptime SLA. For more information, see private AKS cluster with a Public DNS address. Alternatively, you can deploy a public AKS cluster and secure access to the API server using authorized IP address ranges. \n   \n The Bicep modules deploy the following Azure resources: \n   \n \n Microsoft.Network/virtualNetworks: a new virtual network with seven subnets:\n \n SystemSubnet : this subnet is used for the agent nodes of the  system  node pool. \n UserSubnet : this subnet is used for the agent nodes of the  user  node pool. \n PodSubnet : this subnet is used to allocate private IP addresses to pods dynamically. \n ApiServerSubnet : API Server VNET Integration projects the API server endpoint directly into this delegated subnet in the virtual network where the AKS cluster is deployed. \n AzureBastionSubnet : a subnet for the Azure Bastion Host. \n VmSubnet : a subnet for a jump-box virtual machine used to connect to the (private) AKS cluster and for the private endpoints. \n AppGwForConSubnet : this subnet contains the proxies created by the Application Load Balancer control plane to handle and distribute the ingress traffic to the AKS-hosted pods. \n \n \n Microsoft.ServiceNetworking/trafficControllers: an Application Gateway for Containers used as a service proxy to handle load balancing, routing, and TLS termination for AKS-hosted workloads. There are two deployment strategies for management of Application Gateway for Containers. You can decide specify the deployment strategy using the  applicationGatewayForContainersType  parameter in the  main.bicep  module:\n \n Bring your own (BYO) deployment: If you choose this strategy, the Bicep module creates the Application Gateway for Containers resource in the target deployment resource group. In this case, you are responsible to create Association and Frontend child resources for the Application Gateway for Containers using the Azure Portal, Bicep, Azure CLI, Terraform, or Azure REST API. Every time you want to create a new Gateway or an Ingress object in your Azure Kubernetes Service (AKS) cluster, it's your responsibility to provision a Frontend child resource for the Application Gateway for Containers upfront and reference it in the annotations in the Gateway or Ingress object. You are also responsible for deleting any Frontend child resource after deleting a Gateway or Ingress object in Kubernetes. \n Managed by ALB Controller: In this deployment strategy Azure Load Balancer (ALB) Controller deployed in AKS using an Helm chart by the deployment script is responsible for the lifecycle of the Application Gateway for Containers resource and its sub resources. The ALB Controller creates Application Gateway for Containers resource in the AKS node resource group when an  ApplicationLoadBalancer  Kubernetes object is defined on the cluster. Every time you want to create a new Gateway or an Ingress object which references the  ApplicationLoadBalancer  Kubernetes object in the annotations, the ALB Controller provisions a new Frontend resource and manage its lifecycle based on the lifecycle of the Gateway or Ingress object. \n \n \n Microsoft.ContainerService/managedClusters: A public or private Azure Kubernetes Service(AKS) cluster composed of a:\n \n A  system  node pool in a dedicated subnet. The default node pool hosts only critical system pods and services. The worker nodes have node taint which prevents application pods from beings scheduled on this node pool. \n A  user  node pool hosting user workloads and artifacts in a dedicated subnet. \n A  windows  node pool hosting Windows Server containers. This node pool is optionally created when the value of the  windowsAgentPoolEnabled  equals  true \n \n \n Microsoft.ManagedIdentity/userAssignedIdentities: a user-defined managed identity used by the AKS cluster to create additional resources like load balancers and managed disks in Azure. \n Microsoft.Compute/virtualMachines: Bicep modules can optionally create a jump-box virtual machine to manage the private AKS cluster. \n Microsoft.Network/bastionHosts: a separate Azure Bastion is deployed in the AKS cluster virtual network to provide SSH connectivity to both agent nodes and virtual machines. \n Microsoft.Network/natGateways: a bring-your-own (BYO) Azure NAT Gateway to manage outbound connections initiated by AKS-hosted workloads. The NAT Gateway is associated to the  SystemSubnet ,  UserSubnet , and  PodSubnet  subnets. The outboundType property of the cluster is set to  userAssignedNatGateway  to specify that a BYO NAT Gateway is used for outbound connections. NOTE: you can update the  outboundType  after cluster creation and this will deploy or remove resources as required to put the cluster into the new egress configuration. For more information, see Updating outboundType after cluster creation. \n Microsoft.Storage/storageAccounts: this storage account is used to store the boot diagnostics logs of both the service provider and service consumer virtual machines. Boot Diagnostics is a debugging feature that allows you to view console output and screenshots to diagnose virtual machine status. \n Microsoft.ContainerRegistry/registries: an Azure Container Registry (ACR) to build, store, and manage container images and artifacts in a private registry for all container deployments. \n Microsoft.KeyVault/vaults: an Azure Key Vault used to store secrets, certificates, and keys that can be mounted as files by pods using Azure Key Vault Provider for Secrets Store CSI Driver. For more information, see Use the Azure Key Vault Provider for Secrets Store CSI Driver in an AKS cluster and Provide an identity to access the Azure Key Vault Provider for Secrets Store CSI Driver. \n Microsoft.Network/privateEndpoints: an Azure Private Endpoint is created for each of the following resources:\n \n Azure Container Registry \n Azure Key Vault \n Azure Storage Account \n API Server when deploying a private AKS cluster. \n \n \n Microsoft.Network/privateDnsZones: an Azure Private DNS Zone is created for each of the following resources:\n \n Azure Container Registry \n Azure Key Vault \n Azure Storage Account \n API Server when deploying a private AKS cluster. \n \n \n Microsoft.Network/networkSecurityGroups: subnets hosting virtual machines and Azure Bastion Hosts are protected by Azure Network Security Groups that are used to filter inbound and outbound traffic. \n Microsoft.Monitor/accounts: An Azure Monitor workspace is a unique environment for data collected by Azure Monitor. Each workspace has its own data repository, configuration, and permissions. Log Analytics workspaces contain logs and metrics data from multiple Azure resources, whereas Azure Monitor workspaces currently contain only metrics related to Prometheus. Azure Monitor managed service for Prometheus allows you to collect and analyze metrics at scale using a Prometheus-compatible monitoring solution, based on the Prometheus. This fully managed service allows you to use the Prometheus query language (PromQL) to analyze and alert on the performance of monitored infrastructure and workloads without having to operate the underlying infrastructure. The primary method for visualizing Prometheus metrics is Azure Managed Grafana. You can connect your Azure Monitor workspace to an Azure Managed Grafana to visualize Prometheus metrics using a set of built-in and custom Grafana dashboards. \n Microsoft.Dashboard/grafana: an Azure Managed Grafana instance used to visualize the Prometheus metrics generated by the Azure Kubernetes Service(AKS) cluster deployed by the Bicep modules. Azure Managed Grafana](https://learn.microsoft.com/en-us/azure/managed-grafana/overview) is a fully managed service for analytics and monitoring solutions. It's supported by Grafana Enterprise, which provides extensible data visualizations. This managed service allows to quickly and easily deploy Grafana dashboards with built-in high availability and control access with Azure security. \n Microsoft.OperationalInsights/workspaces: a centralized Azure Log Analytics workspace is used to collect the diagnostics logs and metrics from all the Azure resources:\n \n Azure Kubernetes Service cluster \n Application Gateway for Containers \n Azure Key Vault \n Azure Network Security Group \n Azure Container Registry \n Azure Storage Account \n Azure jump-box virtual machine \n \n \n Microsoft.Resources/deploymentScripts: a deployment script is used to run the  install-alb-controller.sh  Bash script that creates the Application Load Balancer (ALB) Controller via Helm along with Cert-Manager. For more information on deployment scripts, see Use deployment scripts in Bicep. \n Microsoft.Insights/actionGroups: an Azure Action Group to send emails and SMS notifications to system administrators when alerts are triggered. \n \n The Bicep modules provide the flexibility to deploy the following Azure resources based on your requirements selectively: \n \n Microsoft.CognitiveServices/accounts: an Azure OpenAI Service with a GPT-3.5 model used by an AI application like a chatbot. Azure OpenAI Service gives customers advanced language AI with OpenAI GPT-4, GPT-3, Codex, and DALL-E models with Azure's security and enterprise promise. Azure OpenAI co-develops the APIs with OpenAI, ensuring compatibility and a smooth transition from one to the other. \n Microsoft.ManagedIdentity/userAssignedIdentities: a user-defined managed identity used by the chatbot application to acquire a security token via Azure AD workload identity to call the Chat Completion API of the ChatGPT model provided by the Azure OpenAI Service. \n \n \n NOTE You can find the  architecture.vsdx  file used for the diagram under the  visio  folder. \n   \n \n What is Bicep? \n Bicep is a domain-specific language (DSL) that uses a declarative syntax to deploy Azure resources. It provides concise syntax, reliable type safety, and support for code reuse. Bicep offers the best authoring experience for your infrastructure-as-code solutions in Azure. \n   \n What is Gateway API? \n The Ingress resources Kubernetes objects have evolved into the more comprehensive and powerful Kubernetes Gateway API. Ingress Controller and Gateway API are both Kubernetes objects used for managing traffic routing and load balancing. While Ingress Controller served as entry points for external traffic, they had limitations in terms of flexibility and extensibility. The Kubernetes Gateway API emerged as a solution to address these limitations. Designed to be generic, expressive, extensible, and role-oriented, the Gateway API is a modern set of APIs for defining L4 and L7 routing rules in Kubernetes. \n   \n \n   \n Gateway API offers superior functionality compared to Ingress Controllers as it separates listeners and routes into separate Kubernetes objects,  Gateway  and  HTTPRoute . This separation allows different individuals with distinct roles and permissions to deploy them in separate namespaces. Additionally, Gateway API provides advanced traffic management capabilities including layer 7 HTTP/HTTPS request forwarding based on criteria such as hostname, path, headers, query string, methods, and ports. It also offers SSL termination and TLS policies for secure traffic management. These features grant better control and customization of traffic routing. The design of the Gateway API was driven by the following design goals to address and resolve issues and limitations in ingress controllers: \n   \n \n Role-oriented: The Gateway API comprises API resources that model organizational roles involved in using and configuring Kubernetes service networking. \n Portable: Similar to Ingress, the Gateway API is designed to be a portable specification supported by multiple implementations. \n Expressive: The Gateway API resources support core functionality such as header-based matching, traffic weighting, and other capabilities that were previously only possible through custom annotations in Ingress. \n Extensible: The Gateway API allows for the linking of custom resources at different layers of the API, enabling granular customization within the API structure. \n \n Additional notable capabilities of the Gateway API include: \n \n GatewayClasses: Formalizes types of load-balancing implementations, making it easier for users to understand available capabilities through the Kubernetes resource model. \n Shared Gateways and cross-Namespace support: Allows multiple Route resources to attach to the same Gateway, enabling load balancer and VIP sharing among teams and across Namespaces without direct coordination. \n Typed Routes and typed backends: The Gateway API supports typed Route resources and different types of backends, providing flexibility in supporting various protocols (HTTP, gRPC) and backend targets (Kubernetes Services, storage buckets, functions). \n Experimental Service mesh support with the GAMMA initiative: The Gateway API enables the association of routing resources with Service resources, allowing the configuration of service meshes and ingress controllers. \n \n When to Choose Ingress Controllers or Gateway API \n Ingress resources are suitable for specific use cases: \n \n Ingress Controllers are a straightforward option for setting up and are well-suited for smaller and less complex Kubernetes deployments that prioritize easy configuration. \n If you currently have Ingress controllers configured in your Kubernetes cluster and they meet your requirements effectively, there may not be an immediate necessity to transition to the Kubernetes Gateway API. \n \n Gateway API is the recommended option in the following situations: \n \n When dealing with complex routing configurations, traffic splitting, and advanced traffic management strategies, the flexibility provided by Kubernetes Gateway API's Route resources is essential. \n In cases where networking requirements call for custom solutions or the integration of third-party plugins, the Kubernetes Gateway API's CRD-based approach offers enhanced extensibility. \n \n What is Application Gateway for Containers? \n The Application Gateway for Containers is a new cutting-edge Azure service that offers load balancing and dynamic traffic management for applications running in a Kubernetes cluster. As part of Azure's Application Load Balancing portfolio, this innovative product provides an enhanced experience for developers and administrators. The Application Gateway for Containers represents the evolution of the Application Gateway Ingress Controller (AGIC) and enables Azure Kubernetes Service (AKS) customers to leverage Azure's native Application Gateway load balancer. Azure Application Gateway for Containers enables you to host multiple web applications on the same port, utilizing unique backend services. This allows for efficient multi-site hosting and simplifies the management of your containerized applications. The Application Gateway for Containers fully supports both the Gateway API and Ingress API Kubernetes objects for traffic load balancing. For more information, see: \n   \n \n What is Application Gateway for Containers? \n Application Gateway for Containers components \n Quickstart: Deploy Application Gateway for Containers ALB Controller \n Quickstart: Create Application Gateway for Containers - Bring your own deployment \n Quickstart: Create Application Gateway for Containers managed by ALB Controller \n Advanced load balancing scenarios with the new Azure Application Gateway for Containers \n \n   \n Deployment Strategies \n Azure Application Gateway for Containers supports two main deployment strategies: \n   \n \n Bring your own (BYO) deployment: If you choose this strategy, the Bicep module creates the Application Gateway for Containers resource in the target deployment resource group. In this case, you are responsible to create Association and Frontend child resources for the Application Gateway for Containers using the Azure Portal, Bicep, Azure CLI, Terraform, or Azure REST API. Every time you want to create a new Gateway or an Ingress object in your Azure Kubernetes Service (AKS) cluster, it's your responsibility to provision a Frontend child resource for the Application Gateway for Containers upfront and reference it in the annotations in the Gateway or Ingress object. After deleting a Gateway or Ingress object in Kubernetes, you are also responsible for deleting any Frontend child resource. \n Managed by the Application Load Balancer (ALB) Controller: In this deployment strategy Azure Load Balancer (ALB) Controller deployed in AKS using an Helm chart by the deployment script is responsible for the lifecycle of the Application Gateway for Containers resource and its sub-resources. The ALB Controller creates an Application Gateway for Containers resource in the AKS node resource group when an  ApplicationLoadBalancer  Kubernetes object is defined on the cluster. Every time you want to create a new Gateway or an Ingress object which references the  ApplicationLoadBalancer  Kubernetes object in the annotations, the ALB Controller provisions a new Frontend resource and manage its lifecycle based on the lifecycle of the Gateway or Ingress object. \n \n   \n Application Gateway for Containers Components \n The components of Azure Application Gateway for Containers include: \n   \n \n Core Components: Application Gateway for Containers is a parent Azure resource that manages the control plane, which handles the configuration and orchestration of proxies based on customer requirements. It serves as the parent resource for two important child resources: associations and frontends. These child resources are unique to each Application Gateway for Containers and cannot be shared with other instances of Application Gateway for Containers. \n Frontend: An Application Gateway for Containers frontend is a sub-resource of the parent Application Gateway for Containers in Azure. It acts as the entry point for client traffic directed towards a specific Application Gateway for Containers. Each frontend is unique and cannot be associated with multiple Application Gateway for Containers. It provides a unique fully qualified domain name (FQDN) that can be assigned to a customer's CNAME record. Currently, private IP addresses are not supported for frontends. Additionally, a single Application Gateway for Containers has the ability to support multiple frontends. \n Association: An Application Gateway for Containers association is a connection point into a virtual network and is a child resource of the Application Gateway for Containers. Application Gateway for Containers is designed to allow for multiple associations, but currently only one association is allowed. During the creation of an association, the necessary data plane is provisioned and connected to a subnet within the defined virtual network. Each association should have at least 256 available addresses in the subnet. If multiple Application Gateway for Containers are provisioned and each contains one association, the required number of available addresses should be n*256. It is important that all association resources match the same region as the parent Application Gateway for Containers resource. The subnet referenced by the association will contain the proxy components used to handle the ingress traffic to the Azure Kubernetes Service (AKS) cluster. \n Managed identity: A user-defined managed identity with appropriate permissions must be provided for the ALB controller to update the control plane. \n Application Load Balancer (ALB) Controller: The Application Gateway for Containers ALB Controller is a vital Kubernetes deployment that facilitates the seamless configuration and deployment of Application Gateway for Containers. By actively monitoring and responding to various Kubernetes Custom Resources and Resource configurations, such as Ingress, Gateway, and ApplicationLoadBalancer, the ALB Controller ensures efficient management of Application Gateway for Containers. Deployed using Helm, the ALB Controller comprises two essential pods. The first is the alb-controller pod, which takes charge of load balancing configuration for Application Gateway for Containers based on customer preferences and intent. The second is the alb-controller-bootstrap pod, responsible for effectively managing Custom Resource Definitions (CRDs) to further optimize the deployment process. \n Managed proxies: These proxies route traffic directly to pods within your Azure Kubernetes Service (AKS) cluster. To ensure direct addressability, the cluster and the proxies must belong to the same virtual network and be configured with Azure CNI. The Application Load Balancer (ALB) control plane creates the proxies inside the subnet referenced by the association. For this reason, the user-defined managed identity used by the Application Load Balancer (ALB) Controller need to be assigned the Network Contributor role on this subnet. The latter needs to have at least with at least a /24 IP address space. \n \n   \n Features and Benefits \n Azure Application Gateway for Containers offers a range of features and benefits, including: \n \n Load Balancing: The service efficiently distributes incoming traffic across multiple containers, ensuring optimal performance and scalability. For more information, see Load balancing features. \n Implementation of Gateway API: Application Gateway for Containers supports the Gateway API, which allows for the definition of routing rules and policies in a Kubernetes-native way. For more information, see Implementation of Gateway API. \n Custom Health Probe: You can define custom health probes to monitor the health of your containers and automatically route traffic away from unhealthy instances. For more information, see Custom health probe for Application Gateway for Containers. \n Session Affinity: The service provides session affinity, allowing you to maintain a consistent user experience by routing subsequent requests from the same client to the same container. For more information, see Application Gateway for Containers session affinity overview. \n TLS Policy: Application Gateway for Containers supports TLS termination, allowing you to offload the SSL/TLS encryption and decryption process to the gateway. For more information, see Application Gateway for Containers TLS policy overview. \n Header Rewrites: Application Gateway for Containers offers the capability to rewrite HTTP headers of client requests and responses from backend targets. Header Rewrites utilize the  IngressExtension  custom resource definition (CRD) of the Application Gateway for Containers. For more details, refer to the documentation on Header Rewrites for Ingress API and Gateway API. \n URL Rewrites: Application Gateway for Containers allows you to modify the URL of a client request, including the hostname and/or path. When Application Gateway for Containers initiates the request to the backend target, it includes the newly rewritten URL. Additional information on URL Rewrites can be found in the documentation for Ingress API and Gateway API. \n \n   \n Advanced Load Balancing \n The Application Gateway for Containers offers an impressive array of traffic management features to enhance your application deployment: \n \n Layer 7 HTTP/HTTPS request forwarding capabilities based on prefix/exact match criteria, including hostname, path, headers, query strings, methods, and ports (80/443). \n Robust support for HTTPS traffic management, including SSL termination and end-to-end SSL encryption. \n Seamless integration with Ingress and Gateway API for streamlined configuration and management. \n Flexible traffic splitting and weighted round-robin functionality to distribute traffic efficiently. \n Mutual Authentication (mTLS) support for establishing secure connections to backend targets. \n Robust health checks to ensure backends are healthy and capable of handling traffic before they are registered. \n Automatic retries to optimize delivery of requests and handle potential failures gracefully. \n TLS Policies that allow for granular control over the encryption protocols and ciphers used for secure communication. \n Autoscaling capabilities to dynamically adjust resources based on workload demands. \n Built-in resilience to handle availability zone failures and ensure continuous operation of your applications. \n \n With these comprehensive features, the Application Gateway for Containers empowers you to efficiently manage and optimize your traffic flow. For more information, see For more information, see Load balancing features. \n   \n Tutorials and Samples \n You can use the following tutorials to begin your journey with the Application Gateway for Containers: \n   \n \n Gateway API\n \n Backend MTLS \n Multi-site hosting \n Path, header, and query string based routing \n SSL Offloading \n Traffic Splitting / Weighted Round Robin \n URL Rewrite \n \n \n Ingress API\n \n Multi-site hosting \n SSL Offloading \n Troubleshoot \n \n \n \n You can find scripts and YAML manifests for the above tutorials under the  tutorials  folder. \n   \n Deploy the Bicep modules \n You can deploy the Bicep modules in the  bicep  folder using the  deploy.sh  Bash script located in the same folder. Specify a value for the following parameters in the  deploy.sh  script and  main.parameters.json  parameters file before deploying the Bicep modules. \n   \n \n prefix : specifies a prefix for all the Azure resources. \n authenticationType : specifies the type of authentication when accessing the Virtual Machine.  sshPublicKey  is the recommended value. Allowed values:  sshPublicKey  and  password . \n applicationGatewayForContainersType : this parameter specifies the deployment type for the Application Gateway for Containers:\n \n managed : the Application Gateway for Containers resource and its child resources, association and frontends, are created and handled by the Azure Loab Balancer controller in the node resource group ofthe AKS cluster. \n byo : the Application Gateway for Containers resource and its child resources are created in the targert resource group. You are responsible for the provisioning and deletion of the association and frontend child resources. \n \n \n vmAdminUsername : specifies the name of the administrator account of the virtual machine. \n vmAdminPasswordOrKey : specifies the SSH Key or password for the virtual machine. \n aksClusterSshPublicKey : specifies the SSH Key or password for AKS cluster agent nodes. \n aadProfileAdminGroupObjectIDs : when deploying an AKS cluster with Azure AD and Azure RBAC integration, this array parameter contains the list of Azure AD group object IDs that will have the admin role of the cluster. \n keyVaultObjectIds : Specifies the object ID of the service principals to configure in Key Vault access policies. \n windowsAgentPoolEnabled : Specifies whether to create a Windows Server agent pool. \n \n   \n We suggest reading sensitive configuration data such as passwords or SSH keys from a pre-existing Azure Key Vault resource. For more information, see Use Azure Key Vault to pass secure parameter value during Bicep deployment. \n   \n Application Gateway for Containers Bicep Module \n The following table contains the code from the  applicationGatewayForContainers.bicep  Bicep module used to deploy a Application Gateway for Containers. \n \n // Parameters\n@description('Specifies the name of the Application Gateway for Containers.')\nparam name string = 'dummy'\n\n@description('Specifies whether the Application Gateway for Containers is managed or bring your own (BYO).')\n@allowed([\n 'managed'\n 'byo'\n])\nparam type string = 'managed'\n\n@description('Specifies the workspace id of the Log Analytics used to monitor the Application Gateway for Containers.')\nparam workspaceId string\n\n@description('Specifies the location of the Application Gateway for Containers.')\nparam location string\n\n@description('Specifies the name of the existing AKS cluster.')\nparam aksClusterName string\n\n@description('Specifies the name of the AKS cluster node resource group. This needs to be passed as a parameter and cannot be calculated inside this module.')\nparam nodeResourceGroupName string\n\n@description('Specifies the name of the existing virtual network.')\nparam virtualNetworkName string\n\n@description('Specifies the name of the subnet which contains the Application Gateway for Containers.')\nparam subnetName string\n\n@description('Specifies the namespace for the Application Load Balancer Controller of the Application Gateway for Containers.')\nparam namespace string = 'azure-alb-system'\n\n@description('Specifies the name of the service account for the Application Load Balancer Controller of the Application Gateway for Containers.')\nparam serviceAccountName string = 'alb-controller-sa'\n\n@description('Specifies the resource tags for the Application Gateway for Containers.')\nparam tags object\n\n// Variables\nvar diagnosticSettingsName = 'diagnosticSettings'\nvar logCategories = [\n 'TrafficControllerAccessLog'\n]\nvar metricCategories = [\n 'AllMetrics'\n]\nvar logs = [for category in logCategories: {\n category: category\n enabled: true\n}]\nvar metrics = [for category in metricCategories: {\n category: category\n enabled: true\n}]\n\n// Resources\nresource aksCluster 'Microsoft.ContainerService/managedClusters@2024-01-02-preview' existing = {\n name: aksClusterName\n}\n\nresource virtualNetwork 'Microsoft.Network/virtualNetworks@2021-08-01' existing = {\n name: virtualNetworkName\n}\n\nresource subnet 'Microsoft.Network/virtualNetworks/subnets@2021-08-01' existing = {\n parent: virtualNetwork\n name: subnetName\n}\n\nresource readerRole 'Microsoft.Authorization/roleDefinitions@2022-04-01' existing = {\n name: 'acdd72a7-3385-48ef-bd42-f606fba81ae7'\n scope: subscription()\n}\n\nresource networkContributorRole 'Microsoft.Authorization/roleDefinitions@2022-04-01' existing = {\n name: '4d97b98b-1d4f-4787-a291-c67834d212e7'\n scope: subscription()\n}\n\nresource appGwForContainersConfigurationManagerRole 'Microsoft.Authorization/roleDefinitions@2022-04-01' existing = {\n name: 'fbc52c3f-28ad-4303-a892-8a056630b8f1'\n scope: subscription()\n}\n\nresource applicationLoadBalancerManagedIdentity 'Microsoft.ManagedIdentity/userAssignedIdentities@2018-11-30' = {\n name: '${name}ManagedIdentity'\n location: location\n tags: tags\n}\n\n// Assign the Network Contributor role to the Application Load Balancer user-assigned managed identity with the association subnet as as scope\nresource subnetNetworkContributorRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = {\n name: guid(name, applicationLoadBalancerManagedIdentity.name, networkContributorRole.id)\n scope: subnet\n properties: {\n roleDefinitionId: networkContributorRole.id\n principalId: applicationLoadBalancerManagedIdentity.properties.principalId\n principalType: 'ServicePrincipal'\n }\n}\n\n// Assign the AppGw for Containers Configuration Manager role to the Application Load Balancer user-assigned managed identity with the resource group as a scope\nresource appGwForContainersConfigurationManagerRoleAssignmenOnResourceGroup 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (type == 'byo') {\n name: guid(name, applicationLoadBalancerManagedIdentity.name, appGwForContainersConfigurationManagerRole.id)\n scope: resourceGroup()\n properties: {\n roleDefinitionId: appGwForContainersConfigurationManagerRole.id\n principalId: applicationLoadBalancerManagedIdentity.properties.principalId\n principalType: 'ServicePrincipal'\n }\n}\n\n// Assign the AppGw for Containers Configuration Manager role to the Application Load Balancer user-assigned managed identity with the AKS cluster node resource group as a scope\nmodule appGwForContainersConfigurationManagerRoleAssignmenOnnodeResourceGroupName 'resourceGroupRoleAssignment.bicep' = if (type == 'managed') {\n name: guid(nodeResourceGroupName, applicationLoadBalancerManagedIdentity.name, appGwForContainersConfigurationManagerRole.id)\n scope: resourceGroup(nodeResourceGroupName)\n params: {\n principalId: applicationLoadBalancerManagedIdentity.properties.principalId\n roleName: appGwForContainersConfigurationManagerRole.name\n }\n}\n\n// Assign the Reader role the Application Load Balancer user-assigned managed identity with the AKS cluster node resource group as a scope\nmodule nodeResourceGroupReaderRoleAssignment 'resourceGroupRoleAssignment.bicep' = {\n name: guid(nodeResourceGroupName, applicationLoadBalancerManagedIdentity.name, readerRole.id)\n scope: resourceGroup(nodeResourceGroupName)\n params: {\n principalId: applicationLoadBalancerManagedIdentity.properties.principalId\n roleName: readerRole.name\n }\n}\n\n// Create federated identity for the Application Load Balancer user-assigned managed identity\nresource federatedIdentityCredentials 'Microsoft.ManagedIdentity/userAssignedIdentities/federatedIdentityCredentials@2023-01-31' = if (!empty(namespace) && !empty(serviceAccountName)) {\n name: 'azure-alb-identity'\n parent: applicationLoadBalancerManagedIdentity\n properties: {\n issuer: aksCluster.properties.oidcIssuerProfile.issuerURL\n subject: 'system:serviceaccount:${namespace}:${serviceAccountName}'\n audiences: [\n 'api://AzureADTokenExchange'\n ]\n }\n}\n\nresource applicationGatewayForContainers 'Microsoft.ServiceNetworking/trafficControllers@2023-11-01' = if (type == 'byo') {\n name: name\n location: location\n tags: tags\n}\n\nresource applicationGatewayDiagnosticSettings 'Microsoft.Insights/diagnosticSettings@2021-05-01-preview' = if (type == 'byo') {\n name: diagnosticSettingsName\n scope: applicationGatewayForContainers\n properties: {\n workspaceId: workspaceId\n logs: logs\n metrics: metrics\n }\n}\n\n// Outputs\noutput id string = applicationGatewayForContainers.id\noutput name string = applicationGatewayForContainers.name\noutput type string = applicationGatewayForContainers.type\noutput principalId string = applicationLoadBalancerManagedIdentity.properties.principalId\noutput clientId string = applicationLoadBalancerManagedIdentity.properties.clientId \n \n The provided Bicep module performs the following steps: \n   \n \n Accepts several parameters, such as the  name ,  type ,  location ,  tags , and more. \n Defines variables for diagnostic settings, such as  diagnosticSettingsName ,  logCategories ,  metricCategories ,  logs , and  metrics . \n References existing resources like the AKS cluster, virtual network, association subnet, Reader role, Network Contributor role, and AppGw for Containers Configuration Manager role. \n Ceates a user-defined managed identity for the Application Load Balancer (ALB) Controller. \n When the  type  parameter is set to  byo , creates an Application Gateway for Containers resource in the target resource group and sets up a diagnostics settings resource to collect logs and metrics from the Application Gateway for Containers in the specified Log Analytics workspace. \n Assigns the Network Contributor role to the Application Load Balancer user-assigned managed identity, scoped to the subnet. \n When the  type  parameter is set to  byo , assigns the AppGw for Containers Configuration Manager role to the Application Load Balancer user-assigned managed identity, scoped to the resource group. This role enables ALB Controller to access and configure the Application Gateway for Containers resource. \n When the  type  parameter is set to  managed , assigns the AppGw for Containers Configuration Manager role to the Application Load Balancer user-assigned managed identity, scoped to the AKS cluster node resource group. In this case, the Application Gateway for Containers is created and managed by the ALB Controller in the AKS node resource group. \n Assigns the Reader role to the Application Load Balancer user-assigned managed identity, scoped to the AKS cluster node resource group. \n Creates a federated identity credentials resource to establish a federated identity for the Application Load Balancer user-assigned managed identity. This is required by the ALB Controller and uses the name  azure-alb-identity  for the federated credential. \n Creates an  applicationGatewayForContainers  resource using the Microsoft.ServiceNetworking/trafficControllers resource type to create the Application Gateway for Containers based on the provided parameters. \n Creates module outputs:\n \n id ,  name , and  type  of the Application Gateway for Containers. \n principalId  and  clientId  of the ALB Controller user-defined managed identity. \n \n \n \n When the value of the  type  parameter is set to  byo , the Bicep module creates an Application Gateway for Containers resource in the specified target resource group. Alternatively, when the  type  parameter is set to  managed , the ALB Controller installed via Helm in the deployment script handles the creation and management of the Application Gateway for Containers in the AKS node resource group. \n   \n Deployment Script \n The following Deployment Script is used to run the  install-alb-controller-sa.sh  Bash script stofed in a public container of a storage container. This script installs necessary dependencies, retrieves cluster credentials, checks the cluster's type, installs Helm and Helm charts, creates namespaces and service accounts, and deploys the Application Load Balancer Controller. \n \n # Install kubectl\naz aks install-cli --only-show-errors\n\n# Get AKS credentials\naz aks get-credentials \\\n --admin \\\n --name $clusterName \\\n --resource-group $resourceGroupName \\\n --subscription $subscriptionId \\\n --only-show-errors\n\n# Check if the cluster is private or not\nprivate=$(az aks show --name $clusterName \\\n --resource-group $resourceGroupName \\\n --subscription $subscriptionId \\\n --query apiServerAccessProfile.enablePrivateCluster \\\n --output tsv)\n\n# Install Helm\ncurl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 -o get_helm.sh -s\nchmod 700 get_helm.sh\n./get_helm.sh &>/dev/null\n\n# Add Helm repos\nhelm repo add prometheus-community https://prometheus-community.github.io/helm-charts\nhelm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx\nhelm repo add jetstack https://charts.jetstack.io\n\n# Update Helm repos\nhelm repo update\n\n# initialize variables\napplicationGatewayForContainersName=''\ndiagnosticSettingName=\"DefaultDiagnosticSettings\"\n\nif [[ $private == 'true' ]]; then\n # Log whether the cluster is public or private\n echo \"$clusterName AKS cluster is private\"\n\n # Install Prometheus\n command=\"helm upgrade prometheus prometheus-community/kube-prometheus-stack \\\n --install \\\n --create-namespace \\\n --namespace prometheus \\\n --set prometheus.prometheusSpec.podMonitorSelectorNilUsesHelmValues=false \\\n --set prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues=false\"\n\n az aks command invoke \\\n --name $clusterName \\\n --resource-group $resourceGroupName \\\n --subscription $subscriptionId \\\n --command \"$command\"\n\n # Install NGINX ingress controller using the internal load balancer\n command=\"helm upgrade nginx-ingress ingress-nginx/ingress-nginx \\\n --install \\\n --create-namespace \\\n --namespace ingress-basic \\\n --set controller.replicaCount=3 \\\n --set controller.nodeSelector.\\\"kubernetes\\.io/os\\\"=linux \\\n --set defaultBackend.nodeSelector.\\\"kubernetes\\.io/os\\\"=linux \\\n --set controller.metrics.enabled=true \\\n --set controller.metrics.serviceMonitor.enabled=true \\\n --set controller.metrics.serviceMonitor.additionalLabels.release=\\\"prometheus\\\" \\\n --set controller.service.annotations.\\\"service\\.beta\\.kubernetes\\.io/azure-load-balancer-health-probe-request-path\\\"=/healthz\"\n\n az aks command invoke \\\n --name $clusterName \\\n --resource-group $resourceGroupName \\\n --subscription $subscriptionId \\\n --command \"$command\"\n\n # Install certificate manager\n command=\"helm upgrade cert-manager jetstack/cert-manager \\\n --install \\\n --create-namespace \\\n --namespace cert-manager \\\n --version v1.14.0 \\\n --set installCRDs=true \\\n --set nodeSelector.\\\"kubernetes\\.io/os\\\"=linux \\\n --set \\\"extraArgs={--feature-gates=ExperimentalGatewayAPISupport=true}\\\"\"\n\n az aks command invoke \\\n --name $clusterName \\\n --resource-group $resourceGroupName \\\n --subscription $subscriptionId \\\n --command \"$command\"\n\n # Create cluster issuer\n command=\"cat <<EOF | kubectl apply -f -\napiVersion: cert-manager.io/v1\nkind: ClusterIssuer\nmetadata:\n name: letsencrypt-nginx\nspec:\n acme:\n server: https://acme-v02.api.letsencrypt.org/directory\n email: $email\n privateKeySecretRef:\n name: letsencrypt\n solvers:\n - http01:\n ingress:\n class: nginx\n podTemplate:\n spec:\n nodeSelector:\n \"kubernetes.io/os\": linux\nEOF\"\n\n az aks command invoke \\\n --name $clusterName \\\n --resource-group $resourceGroupName \\\n --subscription $subscriptionId \\\n --command \"$command\"\n\n if [[ -n \"$namespace\" && \\\n -n \"$serviceAccountName\" ]]; then\n # Create workload namespace\n command=\"kubectl create namespace $namespace\"\n\n az aks command invoke \\\n --name $clusterName \\\n --resource-group $resourceGroupName \\\n --subscription $subscriptionId \\\n --command \"$command\"\n\n # Create service account\n command=\"cat <<EOF | kubectl apply -f -\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n annotations:\n azure.workload.identity/client-id: $workloadManagedIdentityClientId\n azure.workload.identity/tenant-id: $tenantId\n labels:\n azure.workload.identity/use: \"true\"\n name: $serviceAccountName\n namespace: $namespace\nEOF\"\n\n az aks command invoke \\\n --name $clusterName \\\n --resource-group $resourceGroupName \\\n --subscription $subscriptionId \\\n --command \"$command\"\n fi\n\n if [[ \"$applicationGatewayForContainersEnabled\" == \"true\" \\\n && -n \"$applicationGatewayForContainersManagedIdentityClientId\" \\\n && -n \"$applicationGatewayForContainersSubnetId\" ]]; then\n \n # Install the Application Load Balancer Controller\n command=\"helm upgrade alb-controller oci://mcr.microsoft.com/application-lb/charts/alb-controller \\\n --install \\\n --create-namespace \\\n --namespace $applicationGatewayForContainersNamespace \\\n --version 1.0.0 \\\n --set albController.podIdentity.clientID=$applicationGatewayForContainersManagedIdentityClientId\"\n\n az aks command invoke \\\n --name $clusterName \\\n --resource-group $resourceGroupName \\\n --subscription $subscriptionId \\\n --command \"$command\"\n\n # Create workload namespace\n command=\"kubectl create namespace alb-infra\"\n\n az aks command invoke \\\n --name $clusterName \\\n --resource-group $resourceGroupName \\\n --subscription $subscriptionId \\\n --command \"$command\"\n\n if [[ \"$applicationGatewayForContainersType\" == \"managed\" ]]; then\n # Define the ApplicationLoadBalancer resource, specifying the subnet ID the Application Gateway for Containers association resource should deploy into. \n # The association establishes connectivity from Application Gateway for Containers to the defined subnet (and connected networks where applicable) to \n # be able to proxy traffic to a defined backend.\n command=\"kubectl apply -f - <<EOF\napiVersion: alb.networking.azure.io/v1\nkind: ApplicationLoadBalancer\nmetadata:\n name: alb\n namespace: alb-infra\nspec:\n associations:\n - $applicationGatewayForContainersSubnetId\nEOF\"\n az aks command invoke \\\n --name $clusterName \\\n --resource-group $resourceGroupName \\\n --subscription $subscriptionId \\\n --command \"$command\"\n\n if [[ -n $nodeResourceGroupName ]]; then \\\n echo -n \"Retrieving the resource id of the Application Gateway for Containers...\"\n counter=1\n while [ $counter -le 600 ]\n do\n # Retrieve the resource id of the managed Application Gateway for Containers resource\n applicationGatewayForContainersId=$(az resource list \\\n --resource-type \"Microsoft.ServiceNetworking/TrafficControllers\" \\\n --resource-group $nodeResourceGroupName \\\n --query [0].id \\\n --output tsv)\n if [[ -n $applicationGatewayForContainersId ]]; then\n echo \n break \n else\n echo -n '.'\n counter=$((counter + 1))\n sleep 1\n fi\n done\n\n if [[ -n $applicationGatewayForContainersId ]]; then\n applicationGatewayForContainersName=$(basename $applicationGatewayForContainersId)\n echo \"[$applicationGatewayForContainersId] resource id of the [$applicationGatewayForContainersName] Application Gateway for Containers successfully retrieved\"\n else\n echo \"Failed to retrieve the resource id of the Application Gateway for Containers\"\n exit -1\n fi\n\n # Check if the diagnostic setting already exists for the Application Gateway for Containers\n echo \"Checking if the [$diagnosticSettingName] diagnostic setting for the [$applicationGatewayForContainersName] Application Gateway for Containers actually exists...\"\n result=$(az monitor diagnostic-settings show \\\n --name $diagnosticSettingName \\\n --resource $applicationGatewayForContainersId \\\n --query name \\\n --output tsv 2>/dev/null)\n\n if [[ -z $result ]]; then\n echo \"[$diagnosticSettingName] diagnostic setting for the [$applicationGatewayForContainersName] Application Gateway for Containers does not exist\"\n echo \"Creating [$diagnosticSettingName] diagnostic setting for the [$applicationGatewayForContainersName] Application Gateway for Containers...\"\n\n # Create the diagnostic setting for the Application Gateway for Containers\n az monitor diagnostic-settings create \\\n --name $diagnosticSettingName \\\n --resource $applicationGatewayForContainersId \\\n --logs '[{\"categoryGroup\": \"allLogs\", \"enabled\": true}]' \\\n --metrics '[{\"category\": \"AllMetrics\", \"enabled\": true}]' \\\n --workspace $workspaceId \\\n --only-show-errors 1>/dev/null\n\n if [[ $? == 0 ]]; then\n echo \"[$diagnosticSettingName] diagnostic setting for the [$applicationGatewayForContainersName] Application Gateway for Containers successfully created\"\n else\n echo \"Failed to create [$diagnosticSettingName] diagnostic setting for the [$applicationGatewayForContainersName] Application Gateway for Containers\"\n exit -1\n fi\n else\n echo \"[$diagnosticSettingName] diagnostic setting for the [$applicationGatewayForContainersName] Application Gateway for Containers already exists\"\n fi\n fi\n fi\n fi\nelse\n # Log whether the cluster is public or private\n echo \"$clusterName AKS cluster is public\"\n\n # Install Prometheus\n echo \"Installing Prometheus...\"\n helm upgrade prometheus prometheus-community/kube-prometheus-stack \\\n --install \\\n --create-namespace \\\n --namespace prometheus \\\n --set prometheus.prometheusSpec.podMonitorSelectorNilUsesHelmValues=false \\\n --set prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues=false\n\n if [[ $? == 0 ]]; then\n echo \"Prometheus successfully installed\"\n else\n echo \"Failed to install Prometheus\"\n exit -1\n fi\n\n # Install NGINX ingress controller using the internal load balancer\n echo \"Installing NGINX ingress controller...\"\n helm upgrade nginx-ingress ingress-nginx/ingress-nginx \\\n --install \\\n --create-namespace \\\n --namespace ingress-basic \\\n --set controller.replicaCount=3 \\\n --set controller.nodeSelector.\"kubernetes\\.io/os\"=linux \\\n --set defaultBackend.nodeSelector.\"kubernetes\\.io/os\"=linux \\\n --set controller.metrics.enabled=true \\\n --set controller.metrics.serviceMonitor.enabled=true \\\n --set controller.metrics.serviceMonitor.additionalLabels.release=\"prometheus\" \\\n --set controller.service.annotations.\"service\\.beta\\.kubernetes\\.io/azure-load-balancer-health-probe-request-path\"=/healthz\n\n if [[ $? == 0 ]]; then\n echo \"NGINX ingress controller successfully installed\"\n else\n echo \"Failed to install NGINX ingress controller\"\n exit -1\n fi\n\n # Install certificate manager\n echo \"Installing certificate manager...\"\n helm upgrade cert-manager jetstack/cert-manager \\\n --install \\\n --create-namespace \\\n --namespace cert-manager \\\n --version v1.14.0 \\\n --set installCRDs=true \\\n --set nodeSelector.\"kubernetes\\.io/os\"=linux \\\n --set \"extraArgs={--feature-gates=ExperimentalGatewayAPISupport=true}\"\n\n if [[ $? == 0 ]]; then\n echo \"Certificate manager successfully installed\"\n else\n echo \"Failed to install certificate manager\"\n exit -1\n fi\n\n # Create cluster issuer\n echo \"Creating cluster issuer...\"\n cat <<EOF | kubectl apply -f -\napiVersion: cert-manager.io/v1\nkind: ClusterIssuer\nmetadata:\n name: letsencrypt-nginx\nspec:\n acme:\n server: https://acme-v02.api.letsencrypt.org/directory\n email: $email\n privateKeySecretRef:\n name: letsencrypt\n solvers:\n - http01:\n ingress:\n class: nginx\n podTemplate:\n spec:\n nodeSelector:\n \"kubernetes.io/os\": linux\nEOF\n\n if [[ -n \"$namespace\" && \\\n -n \"$serviceAccountName\" ]]; then\n # Create workload namespace\n result=$(kubectl get namespace -o 'jsonpath={.items[?(@.metadata.name==\"'$namespace'\")].metadata.name'})\n\n if [[ -n $result ]]; then\n echo \"$namespace namespace already exists in the cluster\"\n else\n echo \"$namespace namespace does not exist in the cluster\"\n echo \"Creating $namespace namespace in the cluster...\"\n kubectl create namespace $namespace\n fi\n\n # Create service account\n echo \"Creating $serviceAccountName service account...\"\n cat <<EOF | kubectl apply -f -\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n annotations:\n azure.workload.identity/client-id: $workloadManagedIdentityClientId\n azure.workload.identity/tenant-id: $tenantId\n labels:\n azure.workload.identity/use: \"true\"\n name: $serviceAccountName\n namespace: $namespace\nEOF\n fi\n\n if [[ \"$applicationGatewayForContainersEnabled\" == \"true\" \\\n && -n \"$applicationGatewayForContainersManagedIdentityClientId\" \\\n && -n \"$applicationGatewayForContainersSubnetId\" ]]; then\n \n # Install the Application Load Balancer\n echo \"Installing Application Load Balancer Controller in $applicationGatewayForContainersNamespace namespace using $applicationGatewayForContainersManagedIdentityClientId managed identity...\"\n helm upgrade alb-controller oci://mcr.microsoft.com/application-lb/charts/alb-controller \\\n --install \\\n --create-namespace \\\n --namespace $applicationGatewayForContainersNamespace \\\n --version 1.0.0 \\\n --set albController.namespace=$applicationGatewayForContainersNamespace \\\n --set albController.podIdentity.clientID=$applicationGatewayForContainersManagedIdentityClientId\n \n if [[ $? == 0 ]]; then\n echo \"Application Load Balancer Controller successfully installed\"\n else\n echo \"Failed to install Application Load Balancer Controller\"\n exit -1\n fi\n\n if [[ \"$applicationGatewayForContainersType\" == \"managed\" ]]; then\n # Create alb-infra namespace\n albInfraNamespace='alb-infra'\n result=$(kubectl get namespace -o 'jsonpath={.items[?(@.metadata.name==\"'$albInfraNamespace'\")].metadata.name'})\n\n if [[ -n $result ]]; then\n echo \"$albInfraNamespace namespace already exists in the cluster\"\n else\n echo \"$albInfraNamespace namespace does not exist in the cluster\"\n echo \"Creating $albInfraNamespace namespace in the cluster...\"\n kubectl create namespace $albInfraNamespace\n fi\n\n # Define the ApplicationLoadBalancer resource, specifying the subnet ID the Application Gateway for Containers association resource should deploy into. \n # The association establishes connectivity from Application Gateway for Containers to the defined subnet (and connected networks where applicable) to \n # be able to proxy traffic to a defined backend.\n echo \"Creating ApplicationLoadBalancer resource...\"\n kubectl apply -f - <<EOF\napiVersion: alb.networking.azure.io/v1\nkind: ApplicationLoadBalancer\nmetadata:\n name: alb\n namespace: alb-infra\nspec:\n associations:\n - $applicationGatewayForContainersSubnetId\nEOF\n if [[ -n $nodeResourceGroupName ]]; then \\\n echo -n \"Retrieving the resource id of the Application Gateway for Containers...\"\n counter=1\n while [ $counter -le 20 ]\n do\n # Retrieve the resource id of the managed Application Gateway for Containers resource\n applicationGatewayForContainersId=$(az resource list \\\n --resource-type \"Microsoft.ServiceNetworking/TrafficControllers\" \\\n --resource-group $nodeResourceGroupName \\\n --query [0].id \\\n --output tsv)\n if [[ -n $applicationGatewayForContainersId ]]; then\n echo \n break \n else\n echo -n '.'\n counter=$((counter + 1))\n sleep 1\n fi\n done\n\n if [[ -n $applicationGatewayForContainersId ]]; then\n applicationGatewayForContainersName=$(basename $applicationGatewayForContainersId)\n echo \"[$applicationGatewayForContainersId] resource id of the [$applicationGatewayForContainersName] Application Gateway for Containers successfully retrieved\"\n else\n echo \"Failed to retrieve the resource id of the Application Gateway for Containers\"\n exit -1\n fi\n\n # Check if the diagnostic setting already exists for the Application Gateway for Containers\n echo \"Checking if the [$diagnosticSettingName] diagnostic setting for the [$applicationGatewayForContainersName] Application Gateway for Containers actually exists...\"\n result=$(az monitor diagnostic-settings show \\\n --name $diagnosticSettingName \\\n --resource $applicationGatewayForContainersId \\\n --query name \\\n --output tsv 2>/dev/null)\n\n if [[ -z $result ]]; then\n echo \"[$diagnosticSettingName] diagnostic setting for the [$applicationGatewayForContainersName] Application Gateway for Containers does not exist\"\n echo \"Creating [$diagnosticSettingName] diagnostic setting for the [$applicationGatewayForContainersName] Application Gateway for Containers...\"\n\n # Create the diagnostic setting for the Application Gateway for Containers\n az monitor diagnostic-settings create \\\n --name $diagnosticSettingName \\\n --resource $applicationGatewayForContainersId \\\n --logs '[{\"categoryGroup\": \"allLogs\", \"enabled\": true}]' \\\n --metrics '[{\"category\": \"AllMetrics\", \"enabled\": true}]' \\\n --workspace $workspaceId \\\n --only-show-errors 1>/dev/null\n\n if [[ $? == 0 ]]; then\n echo \"[$diagnosticSettingName] diagnostic setting for the [$applicationGatewayForContainersName] Application Gateway for Containers successfully created\"\n else\n echo \"Failed to create [$diagnosticSettingName] diagnostic setting for the [$applicationGatewayForContainersName] Application Gateway for Containers\"\n exit -1\n fi\n else\n echo \"[$diagnosticSettingName] diagnostic setting for the [$applicationGatewayForContainersName] Application Gateway for Containers already exists\"\n fi\n fi\n fi\n fi\nfi\n\n# Create output as JSON file\necho '{}' |\n jq --arg x $applicationGatewayForContainersName '.applicationGatewayForContainersName=$x' |\n jq --arg x $namespace '.namespace=$x' |\n jq --arg x $serviceAccountName '.serviceAccountName=$x' |\n jq --arg x 'prometheus' '.prometheus=$x' |\n jq --arg x 'cert-manager' '.certManager=$x' |\n jq --arg x 'ingress-basic' '.nginxIngressController=$x' >$AZ_SCRIPTS_OUTPUT_PATH \n \n The script performs the following steps: \n   \n \n Installs  kubectl  using the Azure CLI command  az aks install-cli . \n Retrieves the AKS cluster credentials using the Azure CLI command  az aks get-credentials . \n Checks whether the AKS cluster is private or public by querying the  enablePrivateCluster  attribute of the cluster's API server access profile using the Azure CLI command  az aks show . \n Installs  Helm  by downloading and executing the  get_helm.sh  script. \n Adds Helm repositories using the  helm repo add  command for the Kube Prometheus Stack and Cert-Manager. \n Updates the Helm repositories using the  helm repo update  command. \n Initializes variables related to the Application Gateway for Containers. \n The script performs the subsequent steps differently depending on whether the cluster is public or private. The script uses the az aks command invoke to execute commands when the cluster is private. \n Installs Prometheus using the  helm upgrade --install  command. \n Installs the certificate manager using the  helm upgrade --install  command. \n If a  namespace  and  serviceAccountName  are provided, it creates the namespace and service account using  kubectl . This information is optional, and it can be used to create the namespace and service account for a workload. \n If the Application Gateway for Containers is enabled and necessary information is provided, it installs the Application Load Balancer Controller using the  helm upgrade --install  command. The YAML manifest specifies the client id of the ALB Controller managed identity from the  applicationGatewayForContainersManagedIdentityClientId  environment variable and the target namespace from the  applicationGatewayForContainersNamespace  environment variable. For more information on the installation of the ALB Controller via Helm, see Quickstart: Deploy Application Gateway for Containers ALB Controller. \n When the  applicationGatewayForContainersType  environment variable is set to  managed , creates the  alb-infra  namespace using  kubectl  and deploys the  ApplicationLoadBalancer  resource in the newly created namespace. The YAML manifest specifies the resource id of the subnet used by the association from the  applicationGatewayForContainersSubnetId  environment variable. \n Retrieves the resource ID of the Application Gateway for Containers and checks if the diagnostic settings exist. If not, it creates the diagnostic settings using  az monitor diagnostic-settings create . \n Creates an output JSON file containing the Application Gateway for Containers  name , worload  namespace  and  service account name , if any, and the namespace for Prometheus and the Certificate Manager. \n \n   \n Review deployed resources \n You can use the Azure portal to list the deployed resources in the resource group. If you chose to deploy an Application Gateway for Containers managed by the ALB Controller, you will find the resource under the node resource group of the AKS cluster. \n   \n \n   \n You can also use Azure CLI to list the deployed resources in the resource group and AKS node resource group: \n \n az resource list --resource-group <resource-group-name>\nnodeResourceGroupName=$(az aks show --name <aks-name> --resource-group <resource-group-name> --query \"nodeResourceGroup\" -o tsv)\naz resource list --resource-group nodeResourceGroupName \n \n You can also use the following PowerShell cmdlet to list the deployed resources in the resource group and AKS node resource group: \n \n Get-AzResource -ResourceGroupName <resource-group-name>\n$NodeResourceGroup = (Get-AzAksCluster -Name <aks-name> -ResourceGroupName <resource-group-name>).NodeResourceGroup\nGet-AzResource -ResourceGroupName $NodeResourceGroup \n \n Deploy Sample \n After confirming the successful deployment, you can easily deploy your workloads and configure them to have a public endpoint through the newly created Application Gateway for Containers. To achieve this, you have two options: either a Gateway or an Ingress in Kubernetes. These options allow you to expose your application to the public internet using the Application Gateway for Containers resource. The documentation provides various tutorials for both the Gateway API and the Ingress API, including: \n   \n \n Gateway API:\n \n Backend MTLS \n Multi-site hosting \n Path, header, and query string-based routing \n SSL Offloading \n Traffic Splitting / Weighted Round Robin \n URL Rewrite \n \n \n Ingress API:\n \n Multi-site hosting \n SSL Offloading \n Troubleshoot \n \n \n \n   \n You can find the scripts and YAML manifests for these tutorials in the  tutorials  folder. Additionally, the  app  folder contains two samples:  byo  for a bring-your-own installation of the Application Gateway for Containers, and  managed  which works with Application Gateway for Containers managed by the ALB Controller. \n For simplicity, let's focus on the  managed  sample, while leaving the  byo  sample for the reader to review. Let's start by reviewing the YAML manifests. \n   \n Deployment \n The  deployment.yaml  file contains the YAML definition for the deployment, the service, and a secret that contains a temporary certificate for the Gatewy listener that will be replaced by the certificate issued by Let's Encrypt via the certificate manager. For more information on how to use the certificate manager to issue a new certificate to a Gateway using HTTP01 challenges, see Configuring the HTTP-01 Gateway API solver . \n \n apiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: httpbin\nspec:\n replicas: 3\n selector:\n matchLabels:\n app: httpbin\n template:\n metadata:\n labels:\n app: httpbin\n spec:\n topologySpreadConstraints:\n - maxSkew: 1\n topologyKey: topology.kubernetes.io/zone\n whenUnsatisfiable: DoNotSchedule\n labelSelector:\n matchLabels:\n app: httpbin\n - maxSkew: 1\n topologyKey: kubernetes.io/hostname\n whenUnsatisfiable: DoNotSchedule\n labelSelector:\n matchLabels:\n app: httpbin\n nodeSelector:\n \"kubernetes.io/os\": linux\n containers:\n - image: docker.io/kennethreitz/httpbin\n imagePullPolicy: IfNotPresent\n name: httpbin\n resources:\n requests:\n memory: \"64Mi\"\n cpu: \"125m\"\n limits:\n memory: \"128Mi\"\n cpu: \"250m\"\n ports:\n - containerPort: 80\n env:\n - name: PORT\n value: \"80\"\n---\napiVersion: v1\nkind: Service\nmetadata:\n name: httpbin\nspec:\n ports:\n - port: 80\n targetPort: 80\n protocol: TCP\n type: ClusterIP\n selector:\n app: httpbin\n---\napiVersion: v1\nkind: Secret\nmetadata:\n name: listener-tls-secret\ndata:\n tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURLakNDQWhJQ0FRRXdEUVlKS29aSWh2Y05BUUVMQlFBd1d6RUxNQWtHQTFVRUJoTUNRVlV4RXpBUkJnTlYKQkFnTUNsTnZiV1V0VTNSaGRHVXhJVEFmQmdOVkJBb01HRWx1ZEdWeWJtVjBJRmRwWkdkcGRITWdVSFI1SUV4MApaREVVTUJJR0ExVUVBd3dMWlhoaGJYQnNaUzVqYjIwd0hoY05Nakl4TVRFMk1EVXhPREV6V2hjTk1qVXdPREV5Ck1EVXhPREV6V2pCYk1Rc3dDUVlEVlFRR0V3SkJWVEVUTUJFR0ExVUVDQXdLVTI5dFpTMVRkR0YwWlRFaE1COEcKQTFVRUNnd1lTVzUwWlhKdVpYUWdWMmxrWjJsMGN5QlFkSGtnVEhSa01SUXdFZ1lEVlFRRERBdGxlR0Z0Y0d4bApMbU52YlRDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTm4vcHNCYjh0RVVlV3lLCkd4UHZmaFdSaHl6Qm9veFQraTBGUzBXYlNJdEtGUFl3ZmhKay9TYmgzZ09mL1NzVVE0MU1kVkJDb25meEF1OHAKdkhrdlk5cjIrRlEwcXBqb3RuNVJadm1QVlhnTVU0MHZhVzdJSkVzUEIyTTk4UDlrL2VkZXhFOUNEbVhRRUgySApYYXFoaFVpRnh1Q0NIeThLWHJOb0JMVGZ1VWRsM2lycTFJMFAxSkVJaXQ2WC9DeVFWQmU3SVI5ZGZlVXc5UFlsClRKVVhBRGdRTzBCVGRYb3RRc1VUZjI1dktFRWcyUjVHQXIwVC9FcThjS3BNcWFiYzhydCtZTjlQYTVLcUFyWS8KR2M0UkdpTVNBSWlTclhtMHFYQzU2cjhEVFk0T2VhV292ZW9TcXp1Ymxzc0lZNHd4alF4OUdBSC9GTWpxU0ltTgozREQ0RElFQ0F3RUFBVEFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBSTBuMTc5VU8xQVFiRmdqMGEvdHBpTDBLCkFPS0U4UTlvSzBleS80VlYzREdQM3duR2FOMW52d2xCVFNKWGFVK1JHejZQZTUxN2RoRklGR3BYblEzemxZV1UKVE0zU0V1NXd4eWpVZUtWSVlvOGQ3dTN2UXdDMnhHK1IrbStSZ0Jxcm5ib003cVhwYjR0dkNRRi82TXl6TzZDNwpNM0RKZmNqdWQxSEszcmlXQy9CYlB3ZjBlN1dtWW95eGZoaTZBUWRZNmZJU3RRZVhVbWJ1aWtPTDE1VjdETEFtCkxHOSt5cExOdHFsa2VXTXBVcU45R0d6ZjdpSTNVMlJKWTlpUjdrcHUzMXdDWGY4VUhPcUxva2prU1JTTTV0dzcKWXRDNHdjN2dNS1FmSi9GaS9JVXRKdmx6djk1V0lGSU4rSURtbHBPdFVZQTBwMmVFeERtRFFJc2xZV1YwMVE9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==\n tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2Z0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktnd2dnU2tBZ0VBQW9JQkFRRFovNmJBVy9MUkZIbHMKaWhzVDczNFZrWWNzd2FLTVUvb3RCVXRGbTBpTFNoVDJNSDRTWlAwbTRkNERuLzByRkVPTlRIVlFRcUozOFFMdgpLYng1TDJQYTl2aFVOS3FZNkxaK1VXYjVqMVY0REZPTkwybHV5Q1JMRHdkalBmRC9aUDNuWHNSUFFnNWwwQkI5CmgxMnFvWVZJaGNiZ2doOHZDbDZ6YUFTMDM3bEhaZDRxNnRTTkQ5U1JDSXJlbC93c2tGUVh1eUVmWFgzbE1QVDIKSlV5VkZ3QTRFRHRBVTNWNkxVTEZFMzl1YnloQklOa2VSZ0s5RS94S3ZIQ3FUS21tM1BLN2ZtRGZUMnVTcWdLMgpQeG5PRVJvakVnQ0lrcTE1dEtsd3VlcS9BMDJPRG5tbHFMM3FFcXM3bTViTENHT01NWTBNZlJnQi94VEk2a2lKCmpkd3crQXlCQWdNQkFBRUNnZ0VCQUozalpYaW9uK01DZXpjN2g0VVd6akQ4NS9Sb2dqdzBqbHVSSEFWY0JGeXQKMlNTOTFuR29KeG5FT1RKUzYrQUpteXQ1bHZYOGJRT0YwV1E2ekVEUksvZHBMRTZBbnBhRTViZnphU3VTdm9wbQpFeFdNbzBZVE93WUo2b1hjVlBJRXlVaU1BSTZPL3pLS1VZYzVSWVBSM0dDOFUyQkRuaVpKMG5FS0EyNmxJdUlyCjlVcWtkSk9wRzJtK09iTnc5a0paZVRJblN2TkJKQ0NXQlRwcmY3TS9IRUprbE5aQU5mV0F0YXptUFp3QXI2cFIKOEpHbzV1ZUl2NXI3S1FJbkpldEF3YStpQ3VTUHZvUlZNOUdrSmZxSHVtVmNJbjU5Z0ZzcXR6dzVGNUlocWQ5eQo3dHNxUTdxNUYxb1BLeGxPOXl4TVQxaUlnWmRNaDZqODFuM1kwaWFlN2lrQ2dZRUE4UG9tVmQxSXh4c3ZYbmRIClM5MkVQUENkQmYybzM2SmczNEJYc3QwV3BaN1ZNWFhvVjFPeFhGeWpsNm1vNVBjMTRUSXpjd2NVdWJJMGVhbWEKVWxVbnR1bDFPMkdhYlh4eDJrR1l6ZmVvalZBVUh5OGNjeWxoTkpXRDl5Ykx0TCttNTBBTFI3V1JSdG5LSUxaSApJc3NjTGRTcGYyMUNUYWU3REk3Q2NNQ3RSbmNDZ1lFQTU1YkhTRFBaNmFUZWp4cDNwdHBDNitrN1duMVdlYnBmCkdDL1Rlb0pIaHVteDB6K3lPNitPcng0YlRZSFhjcU1Fa2pwRWxqN0xwb3ZxMktjYUN6SUxvVHdQTWVjNncxSVQKZTRld01JM3Nid2FKMFFhZXgvWHdVR1J0R3RuNkVka25qK2VaWSsxYUpscEJBcjlZZ0VKaTFUci9wZW9VdEtJUwpYSGNsbzY3dmFzY0NnWUJwQ2pFaHBuWnR5OHpIR2FrclNhQzF5NUEycDA0d1JTQ0M2L2ZPVUk3cG5LV0RqTWk5CklBOGttb0Q0d0F5TjJiQlR2RVV1ODd3MkFaYmNIWERXU0tZcUZmTnk4ZVdWcWZRYTFoTWNYTUxNN2tZSEhjc0IKNjl5aVJqWWl5bmRyRDB0YWE5RSs3Y2Nvb2hCNFY5d0VMNUxWNjJnQzBvWmZRU2pJbllYbURpYTVtd0tCZ0ZwbworWm1OYklnVExqT3R3SUpwK1BCQ1dFS0daZWtWd2lRZUg3QlhCZmQ4YWtpdk9EU20zOHdyczdyNWNwTzFZb1ozCnF1a0EwTjVQQnpyWFdZcC9XaHp5NW5lejdyUHI2ZUV5NHF6QjYwaVl3OXJQZTlOU2h5UExZUEMzb2pHdmxndE8KL2dvTjBrRGd3VHFDV3RtUGtTZnZaWGh2UHZBWnlaTkJqSGN2UnhabkFvR0JBS2hnZnlUNTVUVTUxR3hJRks2YwpqNkM5cEdveHJ5Qk0wSzVTb3FqWk5ud2J5UEwzL2Yybmcwb2tSek5iNEorTVJrOVk1RXlIZkw5WlNTdUNKMHdnCkNOMlRZSnZZQWRETWJiOThZSXB3cTdqdkp4VG15cHFEK2lxM1BBVU9RQ3hrVy9FMnVyOXZMbmZlcFcvVFVaVEMKOWdnOFFQL3Y2Q1owamRpeVBYZEJpb1ZOCi0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K\ntype: kubernetes.io/tls \n \n Gateway \n The  gateway.yaml  contains the definition of the Gateway used by the application. When using an Application Gateway for Containers managed by the ALB Controller, the frontend is auotmatically created for your by the ALB Controller. \n \n apiVersion: gateway.networking.k8s.io/v1\nkind: Gateway\nmetadata:\n name: httpbin-gateway\n namespace: gateway-demo\n annotations:\n cert-manager.io/issuer: letsencrypt\n alb.networking.azure.io/alb-name: alb\n alb.networking.azure.io/alb-namespace: alb-infra\nspec:\n gatewayClassName: azure-alb-external\n listeners:\n - name: http\n protocol: HTTP\n port: 80\n allowedRoutes:\n namespaces:\n from: All\n - hostname: dummy.babosbird.com\n name: https\n port: 443\n protocol: HTTPS\n allowedRoutes:\n namespaces:\n from: All\n tls:\n mode: Terminate\n certificateRefs:\n - name: listener-tls-secret \n \n Issuer \n The Gateway defines a certificate issuer in the  cert-manager.io/issuer  annotation, so we need to create an issuer. In the issuer, we define the CA root, Let's Encrypt in this case, for the certificate chain to issue our certificate and the challenge type that our client would like to handle to prove our controll over the domain (in our case we will use HTTP01 challenge). \n \n apiVersion: cert-manager.io/v1\nkind: Issuer\nmetadata:\n name: letsencrypt\n namespace: gateway-demo\nspec:\n acme:\n server: https://acme-v02.api.letsencrypt.org/directory\n email: 'admin@contoso.com'\n privateKeySecretRef:\n name: letsencrypt\n solvers:\n - http01:\n gatewayHTTPRoute:\n parentRefs:\n - name: httpbin-gateway\n namespace: gateway-demo\n kind: Gateway \n \n The certificate manager follows a series of steps to issue a certificate to the Gateway listener: \n   \n \n The gateway creates a Certificate object with a nil revision flag, initially pointing to a self-signed TLS secret. However, since the CA of the TLS secret is not valid, the Certificate realizes this and proceeds to the next step. \n A CertificateRequest (CR) object is created with the revision flag set to 1, indicating the need to re-issue a valid certificate. The CR contains all the necessary information to send a CSR request in PKCS #10 format to the CA. \n The CR object creates an Order object to monitor the request process. \n The Cluster Issuer registers itself in the ACME server (CA) using our public key, which is included in the CR. The CA server generates a unique token/key for our request and associates them with our public key, enabling the verification of our signature for future requests from our client. \n The CA server returns the unique token and key for each supported challenge to our client (Cluster Issuer), which were stored for our public key on the CA server. \n The cluster issuer updates the Order object with the server-supported challenges, along with their unique token and key. \n Based on the supported challenges and our Issuer configuration, the Order object determines to solve the HTTP01 Challenge, utilizing the parameters provided by the ACME server. \n A new pod is created in the default namespace to run the Challenge. This pod contains an HTTP server that serves on a specific path and expects the origin to be our domain name. If the origin does not match, a 404 error is returned. \n \n   \n HTTPRoute \n The  httproute.yaml  contains the definition of the  HTTPRoute  object used to route requests to the service: \n \n apiVersion: gateway.networking.k8s.io/v1\nkind: HTTPRoute\nmetadata:\n name: httpbin-route\n namespace: gateway-demo\nspec:\n parentRefs:\n - name: httpbin-gateway\n namespace: gateway-demo\n kind: Gateway\n rules:\n - backendRefs:\n - name: httpbin\n port: 80 \n \n Scripts \n You are now ready to deploy the  managed  sample to your AKS cluster. As a first step, enter a correct value for each variable in the  00.variables.sh  file: \n   \n   \n # Certificate Manager\ncmNamespace=\"cert-manager\"\ncmRepoName=\"jetstack\"\ncmRepoUrl=\"https://charts.jetstack.io\"\ncmChartName=\"cert-manager\"\ncmReleaseName=\"cert-manager\"\ncmVersion=\"v1.14.0\"\n\n# Application Load Balancer \napplicationLoadBalancerName=\"alb\"\napplicationLoadBalancerNamespace=\"alb-infra\"\n\n# Demo\nnamespace=\"agc-demo\"\ngatewayName=\"echo-gateway\"\nissuerName=\"letsencrypt\"\nhttpRouteName=\"echo-route\"\n\n# Ingress and DNS\ndnsZoneName=\"babosbird.com\"\ndnsZoneResourceGroupName=\"DnsResourceGroup\"\nsubdomain=\"shogunagc\"\nhostname=\"$subdomain.$dnsZoneName\" \n   \n   \n Run the  01-install-cert-manager.sh  script if you need to install the certificate manager in your AKS cluster. \n \n #/bin/bash\n\n# Variables\nsource ./00-variables.sh\n\n# Check if the cert-manager repository is not already added\nresult=$(helm repo list | grep $cmRepoName | awk '{print $1}')\n\nif [[ -n $result ]]; then\n echo \"[$cmRepoName] Helm repo already exists\"\nelse\n # Add the Jetstack Helm repository\n echo \"Adding [$cmRepoName] Helm repo...\"\n helm repo add $cmRepoName $cmRepoUrl\nfi\n\n# Update your local Helm chart repository cache\necho 'Updating Helm repos...'\nhelm repo update\n\n# Install cert-manager Helm chart\nresult=$(helm list -n $cmNamespace | grep $cmReleaseName | awk '{print $1}')\n\nif [[ -n $result ]]; then\n echo \"[$cmReleaseName] cert-manager already exists in the $cmNamespace namespace\"\n echo \"Upgrading [$cmReleaseName] cert-manager to the $cmNamespace namespace...\"\nelse\n # Install the cert-manager Helm chart\n echo \"Deploying [$cmReleaseName] cert-manager to the $cmNamespace namespace...\"\nfi\n\nhelm upgrade $cmReleaseName $cmRepoName/$cmChartName \\\n --install \\\n --create-namespace \\\n --namespace $cmNamespace \\\n --version $cmVersion \\\n --set installCRDs=true \\\n --set nodeSelector.\"kubernetes\\.io/os\"=linux \\\n --set \"extraArgs={--feature-gates=ExperimentalGatewayAPISupport=true}\" \n \n Then run the  02-create-sample.sh  script to deploy the application to the specified namespace. The script makes use of the yq tool. \n \n #!/bin/bash\n\n# Variables\nsource ./00-variables.sh\n\n# Check if namespace exists in the cluster\nresult=$(kubectl get namespace -o jsonpath=\"{.items[?(@.metadata.name=='$namespace')].metadata.name}\")\n\nif [[ -n $result ]]; then\n echo \"$namespace namespace already exists in the cluster\"\nelse\n echo \"$namespace namespace does not exist in the cluster\"\n echo \"creating $namespace namespace in the cluster...\"\n kubectl create namespace $namespace\nfi\n\n# Create a sample web application\nkubectl apply -n $namespace -f ./deployment.yaml\n\n# Create Gateway\ncat gateway.yaml |\n yq \"(.metadata.name)|=\"\\\"\"$gatewayName\"\\\" |\n yq \"(.metadata.namespace)|=\"\\\"\"$namespace\"\\\" |\n yq \"(.metadata.annotations.\"\\\"\"cert-manager.io/issuer\"\\\"\")|=\"\\\"\"$issuerName\"\\\" |\n yq \"(.metadata.annotations.\"\\\"\"alb.networking.azure.io/alb-name\"\\\"\")|=\"\\\"\"$applicationLoadBalancerName\"\\\" |\n yq \"(.metadata.annotations.\"\\\"\"alb.networking.azure.io/alb-namespace\"\\\"\")|=\"\\\"\"$applicationLoadBalancerNamespace\"\\\" |\n yq \"(.spec.listeners[1].hostname)|=\"\\\"\"$hostname\"\\\" |\nkubectl apply -f -\n\n# Create Issuer\ncat issuer.yaml |\n yq \"(.metadata.name)|=\"\\\"\"$issuerName\"\\\" |\n yq \"(.metadata.namespace)|=\"\\\"\"$namespace\"\\\" |\n yq \"(.spec.acme.solvers[0].http01.gatewayHTTPRoute.parentRefs[0].name)|=\"\\\"\"$gatewayName\"\\\" |\n yq \"(.spec.acme.solvers[0].http01.gatewayHTTPRoute.parentRefs[0].namespace)|=\"\\\"\"$namespace\"\\\" |\nkubectl apply -f -\n\n# Create HTTPRoute\ncat httproute.yaml |\n yq \"(.metadata.name)|=\"\\\"\"$httpRouteName\"\\\" |\n yq \"(.metadata.namespace)|=\"\\\"\"$namespace\"\\\" |\n yq \"(.spec.parentRefs[0].name)|=\"\\\"\"$gatewayName\"\\\" |\n yq \"(.spec.parentRefs[0].namespace)|=\"\\\"\"$namespace\"\\\" |\nkubectl apply -f - \n \n If you delegated the management of your public DNS to Azure DNS, you can use the  03-configure-dns.sh  script to create a CNAME for the FQDN assigned to the frontend used by the Gateway. \n \n # Variables\nsource ./00-variables.sh\n\n# Get the FQDN of the gateway\necho -n \"Retrieving the FQDN of the [$gatewayName] gateway...\"\nwhile true\ndo\n fqdn=$(kubectl get gateway $gatewayName -n $namespace -o jsonpath='{.status.addresses[0].value}')\n if [[ -n $fqdn ]]; then\n echo \n break \n else\n echo -n '.'\n sleep 1\n fi\ndone\n\nif [ -n $fqdn ]; then\n echo \"[$fqdn] FQDN successfully retrieved from the [$gatewayName] gateway\"\nelse\n echo \"Failed to retrieve the FQDN from the [$gatewayName] gateway\"\n exit\nfi\n\n# Check if an CNAME record for todolist subdomain exists in the DNS Zone\necho \"Retrieving the CNAME for the [$subdomain] subdomain from the [$dnsZoneName] DNS zone...\"\ncname=$(az network dns record-set cname list \\\n --zone-name $dnsZoneName \\\n --resource-group $dnsZoneResourceGroupName \\\n --query \"[?name=='$subdomain'].CNAMERecord.cname\" \\\n --output tsv \\\n --only-show-errors)\n\nif [[ -n $cname ]]; then\n echo \"A CNAME already exists in [$dnsZoneName] DNS zone for the [$subdomain]\"\n\n if [[ $cname == $fqdn ]]; then\n echo \"The [$cname] CNAME equals the FQDN of the [$gatewayName] gateway. No additional step is required.\"\n exit\n else\n echo \"The [$cname] CNAME is different than the [$fqdn] FQDN of the [$gatewayName] gateway\"\n fi\n\n # Delete the CNAME record\n echo \"Deleting the [$subdomain] CNAME from the [$dnsZoneName] zone...\"\n\n az network dns record-set cname delete \\\n --name $subdomain \\\n --zone-name $dnsZoneName \\\n --resource-group $dnsZoneResourceGroupName \\\n --only-show-errors \\\n --yes\n\n if [[ $? == 0 ]]; then\n echo \"[$subdomain] CNAME successfully deleted from the [$dnsZoneName] zone\"\n else\n echo \"Failed to delete the [$subdomain] CNAME from the [$dnsZoneName] zone\"\n exit\n fi\nelse\n echo \"No CNAME exists in [$dnsZoneName] DNS zone for the [$subdomain] subdomain\"\nfi\n\n# Create a CNAME record\necho \"Creating a CNAME in the [$dnsZoneName] DNS zone for the [$fqdn] FQDN of the [$gatewayName] gateway...\"\naz network dns record-set cname set-record \\\n --cname $fqdn \\\n --zone-name $dnsZoneName \\\n --resource-group $dnsZoneResourceGroupName \\\n --record-set-name $subdomain \\\n --only-show-errors 1>/dev/null\n\nif [[ $? == 0 ]]; then\n echo \"[$subdomain] CNAME successfully created in the [$dnsZoneName] DNS zone for the [$fqdn] FQDN of the [$gatewayName] gateway\"\nelse\n echo \"Failed to create a CNAME in the [$dnsZoneName] DNS zone for the [$fqdn] FQDN of the [$gatewayName] gateway\"\nfi \n \n Finally, you can test the sample by running the  04-test-application.sh  script. \n   \n   \n   \n   \n   \n #!/bin/bash\n\n# Variables\nsource ./00-variables.sh\n\n# Curling this FQDN should return responses from the backend as configured in the HTTPRoute\ncurl https://$hostname\n \n   \n   \n   \n   \n   \n   \n You can also open the application using a web browser: \n   \n \n   \n Clean up resources \n You can delete the resource group using the following Azure CLI command when you no longer need the resources you created. This will remove all the Azure resources. \n \n az group delete --name <resource-group-name> \n \n Alternatively, you can use the following PowerShell cmdlet to delete the resource group and all the Azure resources. \n   \n   \n Remove-AzResourceGroup -Name <resource-group-name> \n   \n   \n   \n   \n   \n   \n   \n   \n   \n   ","body@stripHtml({\"removeProcessingText\":true,\"removeSpoilerMarkup\":true,\"removeTocMarkup\":true,\"truncateLength\":-1})@stringLength":"85867","kudosSumWeight":7,"repliesCount":4,"readOnly":false,"images":{"__typename":"AssociatedImageConnection","edges":[{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDE","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zOTY3NDM0LTUyMDQ1MGkxRjQ3RjlFN0I5RDgwNjg4?revision=12\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDI","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zOTY3NDM0LTUyMDQ4N2k3NjdEQUY5QkIyMTA3Rjcz?revision=12\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDM","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zOTY3NDM0LTUyMDQ1M2lFQjEzNzRDQkVCN0U3RTdC?revision=12\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDQ","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zOTY3NDM0LTUyMDQ2MWk3MUM2MEU2MEQzMDg1QkEx?revision=12\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDU","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zOTY3NDM0LTUyMDQ2MmkxQTJDNzU2MkU3QzA5QTcy?revision=12\"}"}}],"totalCount":5,"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}},"videos":{"__typename":"VideoConnection","edges":[],"totalCount":0,"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}},"coverImage":null,"coverImageProperties":{"__typename":"CoverImageProperties","style":"STANDARD","titlePosition":"BOTTOM","altText":""}},"Conversation:conversation:4066739":{"__typename":"Conversation","id":"conversation:4066739","topic":{"__typename":"BlogTopicMessage","uid":4066739},"lastPostingActivityTime":"2024-02-26T00:00:00.045-08:00","solved":false},"User:user:1293846":{"__typename":"User","uid":1293846,"login":"AssafFraenkel","registrationData":{"__typename":"RegistrationData","status":null},"deleted":false,"avatar":{"__typename":"UserAvatar","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/m_assets/avatars/default/avatar-9.svg?time=0"},"id":"user:1293846"},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDY2NzM5LTU1NDg5NmlDMDc2NzY1RDFFOUVEODIy?revision=2\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDY2NzM5LTU1NDg5NmlDMDc2NzY1RDFFOUVEODIy?revision=2","title":"Designer (1).jpeg","associationType":"TEASER","width":1024,"height":1024,"altText":null},"BlogTopicMessage:message:4066739":{"__typename":"BlogTopicMessage","subject":"Relational Data Synchronization between environments","conversation":{"__ref":"Conversation:conversation:4066739"},"id":"message:4066739","revisionNum":2,"uid":4066739,"depth":0,"board":{"__ref":"Blog:board:FastTrackforAzureBlog"},"author":{"__ref":"User:user:1293846"},"teaser@stripHtml({\"removeProcessingText\":true,\"truncateLength\":-1})":" There are business and/or technical cases where relational data should be duplicated to another environment. Since the demands of those business and/or technical cases are not the same, there are multiple technical solutions to achieve the goal. \n In this article, I will discuss of the various solutions according to difference business needs, with deep dive into one family of solutions – sync solutions that is based on the database engine (DB engine). The content is Azure oriented, but the same concepts are true for other clouds as well. I would expect that anyone that needs to sync relational data between environment can find here a good guideline. ","introduction":"","metrics":{"__typename":"MessageMetrics","views":4444},"postTime":"2024-02-26T00:00:00.045-08:00","lastPublishTime":"2024-02-26T00:00:00.045-08:00","body@stripHtml({\"removeProcessingText\":true,\"removeSpoilerMarkup\":true,\"removeTocMarkup\":true,\"truncateLength\":-1})":" Relational Data Synchronization between environments \n There are business and/or technical cases where relational data should be duplicated to another environment. Since the demands of those business and/or technical cases are not the same, there are multiple technical solutions to achieve the goal. \n In this article, I will discuss of the various solutions according to difference business needs, with deep dive into one family of solutions – sync solutions that is based on the database engine (DB engine). The content is Azure oriented, but the same concepts are true for other clouds as well. I would expect that anyone that needs to sync relational data between environment can find here a good guideline. \n   \n General synchronization demands \n Let us start with the typical demands: \n \n \n \n \n Scenario \n \n \n Latency \n \n \n Typical solution family \n \n \n \n \n Data Warehouse \n \n \n Hours to day \n \n \n ETL \n \n \n \n \n Data mart \n \n \n Minutes to hours \n \n \n DB engine Sync \n \n \n \n \n High utilized DB \n \n \n Seconds to minutes \n \n \n DB engine Full or Sync \n \n \n \n \n High availability \n \n \n Seconds \n \n \n DB engine Full \n \n \n \n \n Disaster Recovery \n \n \n Seconds to minutes \n \n \n DB engine Full \n \n \n \n \n Network separation \n \n \n Vary \n \n \n Vary \n \n \n \n \n DB engine Sync is the focus if this article. See below. \n   \n Here is high level description of those solution families: \n ETL (Extract,Transform,Load): \n \n Used for populating data warehouses or data marts from production systems \n Usually, the schema on the target is more reporting friendly (star schema) than the production system \n The data in the target can be in delay (usually hours) compared to the source \n The source and the target can be utilizing different technologies \n Tools in the market: Azure Data Factory, Informatica, Ascend \n \n DB engine full: \n \n Built-in replica mechanism to have another copy of the full database \n With or without the ability to have one or more replicas that can be utilized as a read replica \n Based on high availability, log shipping, backup & restore or storage-based solutions \n Used for HA/DR and or read scale operation \n Minimal latency (seconds) \n Same technology \n Read only on the target \n \n DB engine sync \n \n Tools in scope: SQL Data sync, Fabric Mirroring, Replication \n Those tools support partial copy of the database \n See more in the next chapter \n \n Each option has its own pros and cons and sometimes you might use more than one solution in the same project. \n In the rest of this article, I will focus on the DB engine sync solutions family usage. \n More information: \n \n ETL - Extract, transform, and load \n Read only Replica: Azure SQL, PostgreSQL, MySQL \n \n DB engine Sync Solutions Family \n The need: \n I cannot exaggerate the importance of choosing a synchronization solution based on your specific business needs. This is the reason that multiple solutions exist – to be able to support your specific need with a good-enough solution. \n A sync process is responsible for sync data between environments. To be more exact, between source and one or more targets. The different solutions might have various kinds of characteristics. \n Here are typical characteristics that you might be interested in: \n \n Various kinds of technology \n Different schema \n Updates on both sides (conflict might happen) \n Latency between the two copies \n Maintenance efforts, skills required \n The level of provider/user responsibility for the sync including re-sync probability, tools and efforts \n \n I chose three key technologies (replication, SQL data sync, Fabric Mirroring) to discuss. The discussion is based on multiple discussions with my customers. \n Replication: \n \n Very mature technology which is supported by the majority of the relational database products \n Low latency – usually seconds \n Multiple flavors – transactional, merge, snapshot \n Different table structure in the source and target are possible with limitations but add complexity \n Multiple subscribers per source are supported \n Monitoring is your responsibility and in case of failure, deep knowledge is needed to avoid reinitializing\n \n For SQL server, you have a built-in replication monitor tool. For other databases you should check. \n The monitor is not doing correction actions. Failing to track the replication status might cause a non-updated target environment \n \n \n Replication of the data to a database of another provider might be possible usually with limitations. You will need a third-party tool to implement such a solution. For SQL Server Heterogeneous Database Replication is deprecated. \n Azure SQL database cannot be a publisher \n You must have a good DBA with specific replication knowledge to maintain the system \n \n Typical scenarios for replication: \n \n Filtering (part of the rows and/or the columns should be replicated \n Low latency needs \n Cross security boundaries with SQL authentication (see in the security section) \n Cross database technologies (SQL server à Oracle) \n \n More information: \n \n Replication: Azure SQL MI, Azure SQL DB, PostgreSQL, MySQL \n \n SQL Data Sync for Azure: \n \n SQL Data Sync is a service built on Azure SQL Database that lets you synchronize the data you select bi-directionally across multiple databases, both on-premises and in the cloud, but only SQL Server based. \n Azure SQL Data Sync does not support Azure SQL Managed Instance or Azure Synapse Analytics at this time \n Source and target should be with the exact same schema \n Multiple subscribers are supported \n \n Typical scenarios for SQL Data Sync: \n \n Considerable number of tables to be replicated \n Managed by Azure experts (limited database knowledge needed) \n SaaS solution preferred \n Azure SQL database source \n Bi-directional synchronization \n \n More information: \n \n Data Sync: Overview, Best Practices \n Azure SQL Data Sync | Tips and Tricks \n \n Mirroring in Microsoft Fabric (private preview): \n \n The target for the synced data is sorted in delta lake table format – no need for relational database \n The primary business scenario is reporting on the target \n The schema cannot be changed on the target \n Azure Cosmos DB, Azure SQL DB and Snowflake customers will be able to use Mirroring to mirror their data in OneLake and unlock all the capabilities of Fabric Warehouse, Direct Lake Mode, Notebooks and much more. \n SQL Server, Azure PostgreSQL, Azure MySQL, Mongo DB and other databases and data warehouses will be coming in CY24. \n \n Typical scenarios for Mirroring with Microsoft Fabric: \n \n The target is reporting only that might integrate data from multiple sources \n The cost associated with maintaining another relational engine for reporting is high. This aspect is even more significant for ISVs that are managing different environments for each customer (tenant) \n Azure SQL or IaaS environment \n Replacing an ETL system with no code solution \n Part of your OneLake data architecture \n \n More information: \n \n Mirroring: Announcement, Copilot, Cosmos DB \n \n Other aspects: \n For the completeness of this article, here is a brief discussion of other aspects of the solutions that you should be aware of: \n Identity and Security: \n \n In all solutions – integrate solution is the best (replication authentication and replication security , SQL Data Sync, Mirroring). \n For replication, you might use SQL authentication. For Azure SQL managed instance it is necessary. \n \n Cost: \n All the solutions do not have direct cost except for the services utilized for the source and target and possible cross data centers network bandwidth utilized. \n Bi-directional and conflict resolution: \n The only Azure native solution support is for SQL Data Sync. \n \n Transactional replication – bi-directional (peer to peer) is rare but has multiple options. Last write wins is the automatic way as defined here. Note:\n \n Peer to peer is not supported by Azure SQL database offerings \n Merge replication has more options but not on Azure SQL database offerings – see here \n \n \n SQL Data Sync - Hub wins or Member wins (see here) \n Mirroring – one direction only , so, not applicable \n \n Scalability and performance: \n \n In all solutions. You can expect reasonable pressure on the source (publisher) is expected. \n SQL Data Sync add triggers to the source database while replication is using log reader (less pressure). \n \n Monitoring and sync status: \n \n For Replication – you have replication monitor and the tablediff utility \n For SQL data Sync and Fabric mirroring – Monitoring Azure SQL Data Sync using OMS Log Analytics or Azure SQL Data Sync Health Checker \n \n Real-time vs. Batch Synchronization: \n All the solutions are well suited to real-time and short transactions. However, batch will work as well with more pressure on the SQL server log. \n For Data Sync, empty tables provide the best performance at initialization time. If the target table is empty, Data Sync uses bulk insert to load the data. Otherwise, Data Sync does a row-by-row comparison and insertion to check for conflicts. If performance is not a concern, however, you can set up sync between tables that already contain data. \n More information: \n Empty tables provide the best performance \n   \n Choosing a DB engine Sync solution \n Here is a short list of criteria that might help you choose a solution: \n \n SQL Data Sync\n \n The best solution for Azure SQL DB \n Portal/script managed \n Target should be from the SQL server family \n \n \n Replication\n \n The only solution for Azure SQL Managed Instance \n Customable (filtering, schema change) \n Deep database knowledge required \n \n \n Fabric mirroring\n \n Your solution where the destination can be/preferred on delta lake table format \n Support multi sources (Azure SQL, Cosmos, Snowflake, more to come) \n Portal/script managed \n \n \n \n More information: \n \n Compare SQL Data Sync with Transactional Replication \n \n  Conclusion \n In the realm of data management, the need to synchronize relational data across environments arises from diverse business and technical requirements. This article has delved into the various solutions available, with a particular focus on database engine-based synchronization in the Azure ecosystem. \n From the high-level demands of scenarios such as Data Warehouse, Data mart, High Utilized DB, High Availability, Disaster Recovery, to the intricacies of choosing between ETL, DB engine full, and DB engine sync solutions, we've explored the landscape of options available. \n In the family of DB engine sync solutions, we've highlighted the importance of aligning your choice with specific business needs. Replication, a mature technology, offers low latency and supports various scenarios, though it requires vigilant monitoring. SQL Data Sync provides bi-directional synchronization for a considerable number of tables, managed by Azure professionals, while Microsoft Fabric's Mirroring offers a unique approach for reporting scenarios. \n Considerations such as identity and security, cost implications, conflict resolution, scalability, and monitoring have been discussed to provide a holistic view. Whether you prioritize low latency, transactional consistency, or ease of management, choosing the right solution is paramount. \n As you navigate the complexities of relational data synchronization, keep in mind the nuances of each solution and the unique demands of your project. Whether opting for a well-established solution like Replication or embracing innovative approaches like Mirroring with Microsoft Fabric, make an informed decision based on your specific use case. \n In conclusion, successful data synchronization is not a one-size-fits-all endeavor. By understanding the characteristics, advantages, and limitations of each solution, you empower yourself to make informed decisions that align with the dynamics of your data ecosystem. Explore further, stay updated on evolving technologies, and tailor your approach to meet the ever-evolving demands of your business. \n You should remember that the technology world in general and in the cloud area in particular are constantly changing. The dynamic nature of data management and the importance of staying abreast of evolving technologies only emphasize that the reader should explore emerging solutions and best practices. \n   ","body@stripHtml({\"removeProcessingText\":true,\"removeSpoilerMarkup\":true,\"removeTocMarkup\":true,\"truncateLength\":-1})@stringLength":"12295","kudosSumWeight":0,"repliesCount":0,"readOnly":false,"images":{"__typename":"AssociatedImageConnection","edges":[{"__typename":"AssociatedImageEdge","cursor":"MjUuM3wyLjF8b3wyNXxfTlZffDE","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MDY2NzM5LTU1NDg5NmlDMDc2NzY1RDFFOUVEODIy?revision=2\"}"}}],"totalCount":1,"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}},"videos":{"__typename":"VideoConnection","edges":[],"totalCount":0,"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}},"coverImage":null,"coverImageProperties":{"__typename":"CoverImageProperties","style":"STANDARD","titlePosition":"BOTTOM","altText":""}},"CachedAsset:text:en_US-components/community/Navbar-1745505307000":{"__typename":"CachedAsset","id":"text:en_US-components/community/Navbar-1745505307000","value":{"community":"Community Home","inbox":"Inbox","manageContent":"Manage Content","tos":"Terms of Service","forgotPassword":"Forgot Password","themeEditor":"Theme Editor","edit":"Edit Navigation Bar","skipContent":"Skip to content","gxcuf89792":"Tech Community","external-1":"Events","s-m-b":"Nonprofit Community","windows-server":"Windows Server","education-sector":"Education Sector","driving-adoption":"Driving Adoption","Common-content_management-link":"Content Management","microsoft-learn":"Microsoft Learn","s-q-l-server":"Content Management","partner-community":"Microsoft Partner Community","microsoft365":"Microsoft 365","external-9":".NET","external-8":"Teams","external-7":"Github","products-services":"Products","external-6":"Power Platform","communities-1":"Topics","external-5":"Microsoft Security","planner":"Outlook","external-4":"Microsoft 365","external-3":"Dynamics 365","azure":"Azure","healthcare-and-life-sciences":"Healthcare and Life Sciences","external-2":"Azure","microsoft-mechanics":"Microsoft Mechanics","microsoft-learn-1":"Community","external-10":"Learning Room Directory","microsoft-learn-blog":"Blog","windows":"Windows","i-t-ops-talk":"ITOps Talk","external-link-1":"View All","microsoft-securityand-compliance":"Microsoft Security","public-sector":"Public Sector","community-info-center":"Lounge","external-link-2":"View All","microsoft-teams":"Microsoft Teams","external":"Blogs","microsoft-endpoint-manager":"Microsoft Intune","startupsat-microsoft":"Startups at Microsoft","exchange":"Exchange","a-i":"AI and Machine Learning","io-t":"Internet of Things (IoT)","Common-microsoft365-copilot-link":"Microsoft 365 Copilot","outlook":"Microsoft 365 Copilot","external-link":"Community Hubs","communities":"Products"},"localOverride":false},"CachedAsset:text:en_US-components/community/NavbarHamburgerDropdown-1745505307000":{"__typename":"CachedAsset","id":"text:en_US-components/community/NavbarHamburgerDropdown-1745505307000","value":{"hamburgerLabel":"Side Menu"},"localOverride":false},"CachedAsset:text:en_US-components/community/BrandLogo-1745505307000":{"__typename":"CachedAsset","id":"text:en_US-components/community/BrandLogo-1745505307000","value":{"logoAlt":"Khoros","themeLogoAlt":"Brand Logo"},"localOverride":false},"CachedAsset:text:en_US-components/community/NavbarTextLinks-1745505307000":{"__typename":"CachedAsset","id":"text:en_US-components/community/NavbarTextLinks-1745505307000","value":{"more":"More"},"localOverride":false},"CachedAsset:text:en_US-components/authentication/AuthenticationLink-1745505307000":{"__typename":"CachedAsset","id":"text:en_US-components/authentication/AuthenticationLink-1745505307000","value":{"title.login":"Sign In","title.registration":"Register","title.forgotPassword":"Forgot Password","title.multiAuthLogin":"Sign In"},"localOverride":false},"CachedAsset:text:en_US-components/nodes/NodeLink-1745505307000":{"__typename":"CachedAsset","id":"text:en_US-components/nodes/NodeLink-1745505307000","value":{"place":"Place {name}"},"localOverride":false},"CachedAsset:text:en_US-components/tags/TagSubscriptionAction-1745505307000":{"__typename":"CachedAsset","id":"text:en_US-components/tags/TagSubscriptionAction-1745505307000","value":{"success.follow.title":"Following Tag","success.unfollow.title":"Unfollowed Tag","success.follow.message.followAcrossCommunity":"You will be notified when this tag is used anywhere across the community","success.unfollowtag.message":"You will no longer be notified when this tag is used anywhere in this place","success.unfollowtagAcrossCommunity.message":"You will no longer be notified when this tag is used anywhere across the community","unexpected.error.title":"Error - Action Failed","unexpected.error.message":"An unidentified problem occurred during the action you took. Please try again later.","buttonTitle":"{isSubscribed, select, true {Unfollow} false {Follow} other{}}","unfollow":"Unfollow"},"localOverride":false},"CachedAsset:text:en_US-shared/client/components/common/QueryHandler-1745505307000":{"__typename":"CachedAsset","id":"text:en_US-shared/client/components/common/QueryHandler-1745505307000","value":{"title":"Query Handler"},"localOverride":false},"CachedAsset:text:en_US-components/community/NavbarDropdownToggle-1745505307000":{"__typename":"CachedAsset","id":"text:en_US-components/community/NavbarDropdownToggle-1745505307000","value":{"ariaLabelClosed":"Press the down arrow to open the menu"},"localOverride":false},"CachedAsset:text:en_US-components/messages/MessageListTabs-1745505307000":{"__typename":"CachedAsset","id":"text:en_US-components/messages/MessageListTabs-1745505307000","value":{"mostKudoed":"{value, select, IDEA {Most Votes} other {Most Likes}}","mostReplies":"Most Replies","mostViewed":"Most Viewed","newest":"{value, select, IDEA {Newest Ideas} OCCASION {Newest Events} other {Newest Topics}}","newestOccasions":"Newest Events","mostRecent":"Most Recent","noReplies":"No Replies Yet","noSolutions":"No Solutions Yet","solutions":"Solutions","mostRecentUserContent":"Most Recent","trending":"Trending","draft":"Drafts","spam":"Spam","abuse":"Abuse","moderation":"Moderation","tags":"Tags","PAST":"Past","UPCOMING":"Upcoming","sortBymostRecent":"Sort By Most Recent","sortBymostRecentUserContent":"Sort By Most Recent","sortBymostKudoed":"Sort By Most Likes","sortBymostReplies":"Sort By Most Replies","sortBymostViewed":"Sort By Most Viewed","sortBynewest":"Sort By Newest Topics","sortBynewestOccasions":"Sort By Newest Events","otherTabs":" Messages list in the {tab} for {conversationStyle}","guides":"Guides","archives":"Archives"},"localOverride":false},"CachedAsset:text:en_US-components/messages/MessageView/MessageViewInline-1745505307000":{"__typename":"CachedAsset","id":"text:en_US-components/messages/MessageView/MessageViewInline-1745505307000","value":{"bylineAuthor":"{bylineAuthor}","bylineBoard":"{bylineBoard}","anonymous":"Anonymous","place":"Place {bylineBoard}","gotoParent":"Go to parent {name}"},"localOverride":false},"CachedAsset:text:en_US-shared/client/components/common/Pager/PagerLoadMore-1745505307000":{"__typename":"CachedAsset","id":"text:en_US-shared/client/components/common/Pager/PagerLoadMore-1745505307000","value":{"loadMore":"Show More"},"localOverride":false},"CachedAsset:text:en_US-shared/client/components/common/OverflowNav-1745505307000":{"__typename":"CachedAsset","id":"text:en_US-shared/client/components/common/OverflowNav-1745505307000","value":{"toggleText":"More"},"localOverride":false},"CachedAsset:text:en_US-components/users/UserLink-1745505307000":{"__typename":"CachedAsset","id":"text:en_US-components/users/UserLink-1745505307000","value":{"authorName":"View Profile: {author}","anonymous":"Anonymous"},"localOverride":false},"CachedAsset:text:en_US-components/messages/MessageSubject-1745505307000":{"__typename":"CachedAsset","id":"text:en_US-components/messages/MessageSubject-1745505307000","value":{"noSubject":"(no subject)"},"localOverride":false},"CachedAsset:text:en_US-components/messages/MessageTime-1745505307000":{"__typename":"CachedAsset","id":"text:en_US-components/messages/MessageTime-1745505307000","value":{"postTime":"Published: {time}","lastPublishTime":"Last Update: {time}","conversation.lastPostingActivityTime":"Last posting activity time: {time}","conversation.lastPostTime":"Last post time: {time}","moderationData.rejectTime":"Rejected time: {time}"},"localOverride":false},"CachedAsset:text:en_US-shared/client/components/nodes/NodeIcon-1745505307000":{"__typename":"CachedAsset","id":"text:en_US-shared/client/components/nodes/NodeIcon-1745505307000","value":{"contentType":"Content Type {style, select, FORUM {Forum} BLOG {Blog} TKB {Knowledge Base} IDEA {Ideas} OCCASION {Events} other {}} icon"},"localOverride":false},"CachedAsset:text:en_US-components/messages/MessageUnreadCount-1745505307000":{"__typename":"CachedAsset","id":"text:en_US-components/messages/MessageUnreadCount-1745505307000","value":{"unread":"{count} unread","comments":"{count, plural, one { unread comment} other{ unread comments}}"},"localOverride":false},"CachedAsset:text:en_US-components/messages/MessageViewCount-1745505307000":{"__typename":"CachedAsset","id":"text:en_US-components/messages/MessageViewCount-1745505307000","value":{"textTitle":"{count, plural,one {View} other{Views}}","views":"{count, plural, one{View} other{Views}}"},"localOverride":false},"CachedAsset:text:en_US-components/kudos/KudosCount-1745505307000":{"__typename":"CachedAsset","id":"text:en_US-components/kudos/KudosCount-1745505307000","value":{"textTitle":"{count, plural,one {{messageType, select, IDEA{Vote} other{Like}}} other{{messageType, select, IDEA{Votes} other{Likes}}}}","likes":"{count, plural, one{like} other{likes}}"},"localOverride":false},"CachedAsset:text:en_US-components/messages/MessageRepliesCount-1745505307000":{"__typename":"CachedAsset","id":"text:en_US-components/messages/MessageRepliesCount-1745505307000","value":{"textTitle":"{count, plural,one {{conversationStyle, select, IDEA{Comment} OCCASION{Comment} other{Reply}}} other{{conversationStyle, select, IDEA{Comments} OCCASION{Comments} other{Replies}}}}","comments":"{count, plural, one{Comment} other{Comments}}"},"localOverride":false},"CachedAsset:text:en_US-components/messages/MessageBody-1745505307000":{"__typename":"CachedAsset","id":"text:en_US-components/messages/MessageBody-1745505307000","value":{"showMessageBody":"Show More","mentionsErrorTitle":"{mentionsType, select, board {Board} user {User} message {Message} other {}} No Longer Available","mentionsErrorMessage":"The {mentionsType} you are trying to view has been removed from the community.","videoProcessing":"Video is being processed. Please try again in a few minutes.","bannerTitle":"Video provider requires cookies to play the video. Accept to continue or {url} it directly on the provider's site.","buttonTitle":"Accept","urlText":"watch"},"localOverride":false},"CachedAsset:text:en_US-shared/client/components/users/UserAvatar-1745505307000":{"__typename":"CachedAsset","id":"text:en_US-shared/client/components/users/UserAvatar-1745505307000","value":{"altText":"{login}'s avatar","altTextGeneric":"User's avatar"},"localOverride":false}}}},"page":"/tags/TagPage/TagPage","query":{"nodeId":"board:FastTrackforAzureBlog","tagName":"Multitenant architecture"},"buildId":"-gVUpXaWnPcjlrLJZ92B7","runtimeConfig":{"buildInformationVisible":false,"logLevelApp":"info","logLevelMetrics":"info","openTelemetryClientEnabled":false,"openTelemetryConfigName":"o365","openTelemetryServiceVersion":"25.3.0","openTelemetryUniverse":"prod","openTelemetryCollector":"http://localhost:4318","openTelemetryRouteChangeAllowedTime":"5000","apolloDevToolsEnabled":false,"inboxMuteWipFeatureEnabled":false},"isFallback":false,"isExperimentalCompile":false,"dynamicIds":["./components/community/Navbar/NavbarWidget.tsx","./components/community/Breadcrumb/BreadcrumbWidget.tsx","./components/customComponent/CustomComponent/CustomComponent.tsx","./components/tags/TagsHeaderWidget/TagsHeaderWidget.tsx","./components/messages/MessageListForNodeByRecentActivityWidget/MessageListForNodeByRecentActivityWidget.tsx","./components/tags/TagSubscriptionAction/TagSubscriptionAction.tsx","./components/external/components/ExternalComponent.tsx","../shared/client/components/common/List/ListGroup/ListGroup.tsx","./components/messages/MessageView/MessageView.tsx","./components/messages/MessageView/MessageViewInline/MessageViewInline.tsx","../shared/client/components/common/Pager/PagerLoadMore/PagerLoadMore.tsx","./components/customComponent/CustomComponentContent/TemplateContent.tsx"],"appGip":true,"scriptLoader":[{"id":"analytics","src":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/pagescripts/1730819800000/analytics.js?page.id=TagPage","strategy":"afterInteractive"}]}