"}},"component({\"componentId\":\"custom.widget.MicrosoftFooter\"})":{"__typename":"Component","render({\"context\":{\"component\":{\"entities\":[],\"props\":{}},\"page\":{\"entities\":[\"board:MachineLearningBlog\",\"message:4362539\"],\"name\":\"BlogMessagePage\",\"props\":{},\"url\":\"https://techcommunity.microsoft.com\"}}})":{"__typename":"ComponentRenderResult","html":""}},"cachedText({\"lastModified\":\"1737128950293\",\"locale\":\"en-US\",\"namespaces\":[\"components/community/NavbarDropdownToggle\"]})":[{"__ref":"CachedAsset:text:en_US-components/community/NavbarDropdownToggle-1737128950293"}],"cachedText({\"lastModified\":\"1737128950293\",\"locale\":\"en-US\",\"namespaces\":[\"components/messages/MessageCoverImage\"]})":[{"__ref":"CachedAsset:text:en_US-components/messages/MessageCoverImage-1737128950293"}],"cachedText({\"lastModified\":\"1737128950293\",\"locale\":\"en-US\",\"namespaces\":[\"shared/client/components/nodes/NodeTitle\"]})":[{"__ref":"CachedAsset:text:en_US-shared/client/components/nodes/NodeTitle-1737128950293"}],"cachedText({\"lastModified\":\"1737128950293\",\"locale\":\"en-US\",\"namespaces\":[\"components/messages/MessageTimeToRead\"]})":[{"__ref":"CachedAsset:text:en_US-components/messages/MessageTimeToRead-1737128950293"}],"cachedText({\"lastModified\":\"1737128950293\",\"locale\":\"en-US\",\"namespaces\":[\"components/messages/MessageSubject\"]})":[{"__ref":"CachedAsset:text:en_US-components/messages/MessageSubject-1737128950293"}],"cachedText({\"lastModified\":\"1737128950293\",\"locale\":\"en-US\",\"namespaces\":[\"components/users/UserLink\"]})":[{"__ref":"CachedAsset:text:en_US-components/users/UserLink-1737128950293"}],"cachedText({\"lastModified\":\"1737128950293\",\"locale\":\"en-US\",\"namespaces\":[\"shared/client/components/users/UserRank\"]})":[{"__ref":"CachedAsset:text:en_US-shared/client/components/users/UserRank-1737128950293"}],"cachedText({\"lastModified\":\"1737128950293\",\"locale\":\"en-US\",\"namespaces\":[\"components/messages/MessageTime\"]})":[{"__ref":"CachedAsset:text:en_US-components/messages/MessageTime-1737128950293"}],"cachedText({\"lastModified\":\"1737128950293\",\"locale\":\"en-US\",\"namespaces\":[\"components/messages/MessageBody\"]})":[{"__ref":"CachedAsset:text:en_US-components/messages/MessageBody-1737128950293"}],"cachedText({\"lastModified\":\"1737128950293\",\"locale\":\"en-US\",\"namespaces\":[\"components/messages/MessageCustomFields\"]})":[{"__ref":"CachedAsset:text:en_US-components/messages/MessageCustomFields-1737128950293"}],"cachedText({\"lastModified\":\"1737128950293\",\"locale\":\"en-US\",\"namespaces\":[\"components/messages/MessageRevision\"]})":[{"__ref":"CachedAsset:text:en_US-components/messages/MessageRevision-1737128950293"}],"cachedText({\"lastModified\":\"1737128950293\",\"locale\":\"en-US\",\"namespaces\":[\"shared/client/components/common/QueryHandler\"]})":[{"__ref":"CachedAsset:text:en_US-shared/client/components/common/QueryHandler-1737128950293"}],"cachedText({\"lastModified\":\"1737128950293\",\"locale\":\"en-US\",\"namespaces\":[\"components/messages/MessageReplyButton\"]})":[{"__ref":"CachedAsset:text:en_US-components/messages/MessageReplyButton-1737128950293"}],"cachedText({\"lastModified\":\"1737128950293\",\"locale\":\"en-US\",\"namespaces\":[\"components/messages/MessageAuthorBio\"]})":[{"__ref":"CachedAsset:text:en_US-components/messages/MessageAuthorBio-1737128950293"}],"cachedText({\"lastModified\":\"1737128950293\",\"locale\":\"en-US\",\"namespaces\":[\"shared/client/components/users/UserAvatar\"]})":[{"__ref":"CachedAsset:text:en_US-shared/client/components/users/UserAvatar-1737128950293"}],"cachedText({\"lastModified\":\"1737128950293\",\"locale\":\"en-US\",\"namespaces\":[\"shared/client/components/ranks/UserRankLabel\"]})":[{"__ref":"CachedAsset:text:en_US-shared/client/components/ranks/UserRankLabel-1737128950293"}],"cachedText({\"lastModified\":\"1737128950293\",\"locale\":\"en-US\",\"namespaces\":[\"components/users/UserRegistrationDate\"]})":[{"__ref":"CachedAsset:text:en_US-components/users/UserRegistrationDate-1737128950293"}],"cachedText({\"lastModified\":\"1737128950293\",\"locale\":\"en-US\",\"namespaces\":[\"shared/client/components/nodes/NodeAvatar\"]})":[{"__ref":"CachedAsset:text:en_US-shared/client/components/nodes/NodeAvatar-1737128950293"}],"cachedText({\"lastModified\":\"1737128950293\",\"locale\":\"en-US\",\"namespaces\":[\"shared/client/components/nodes/NodeDescription\"]})":[{"__ref":"CachedAsset:text:en_US-shared/client/components/nodes/NodeDescription-1737128950293"}],"cachedText({\"lastModified\":\"1737128950293\",\"locale\":\"en-US\",\"namespaces\":[\"components/tags/TagView/TagViewChip\"]})":[{"__ref":"CachedAsset:text:en_US-components/tags/TagView/TagViewChip-1737128950293"}],"cachedText({\"lastModified\":\"1737128950293\",\"locale\":\"en-US\",\"namespaces\":[\"shared/client/components/nodes/NodeIcon\"]})":[{"__ref":"CachedAsset:text:en_US-shared/client/components/nodes/NodeIcon-1737128950293"}]},"CachedAsset:pages-1737128934830":{"__typename":"CachedAsset","id":"pages-1737128934830","value":[{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"BlogViewAllPostsPage","type":"BLOG","urlPath":"/category/:categoryId/blog/:boardId/all-posts/(/:after|/:before)?","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"CasePortalPage","type":"CASE_PORTAL","urlPath":"/caseportal","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"CreateGroupHubPage","type":"GROUP_HUB","urlPath":"/groups/create","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"CaseViewPage","type":"CASE_DETAILS","urlPath":"/case/:caseId/:caseNumber","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"InboxPage","type":"COMMUNITY","urlPath":"/inbox","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"HelpFAQPage","type":"COMMUNITY","urlPath":"/help","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"IdeaMessagePage","type":"IDEA_POST","urlPath":"/idea/:boardId/:messageSubject/:messageId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"IdeaViewAllIdeasPage","type":"IDEA","urlPath":"/category/:categoryId/ideas/:boardId/all-ideas/(/:after|/:before)?","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"LoginPage","type":"USER","urlPath":"/signin","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"BlogPostPage","type":"BLOG","urlPath":"/category/:categoryId/blogs/:boardId/create","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"ThemeEditorPage","type":"COMMUNITY","urlPath":"/designer/themes","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"TkbViewAllArticlesPage","type":"TKB","urlPath":"/category/:categoryId/kb/:boardId/all-articles/(/:after|/:before)?","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1730142000000,"localOverride":null,"page":{"id":"AllEvents","type":"CUSTOM","urlPath":"/Events","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"OccasionEditPage","type":"EVENT","urlPath":"/event/:boardId/:messageSubject/:messageId/edit","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"OAuthAuthorizationAllowPage","type":"USER","urlPath":"/auth/authorize/allow","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"PageEditorPage","type":"COMMUNITY","urlPath":"/designer/pages","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"PostPage","type":"COMMUNITY","urlPath":"/category/:categoryId/:boardId/create","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"ForumBoardPage","type":"FORUM","urlPath":"/category/:categoryId/discussions/:boardId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"TkbBoardPage","type":"TKB","urlPath":"/category/:categoryId/kb/:boardId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"EventPostPage","type":"EVENT","urlPath":"/category/:categoryId/events/:boardId/create","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"UserBadgesPage","type":"COMMUNITY","urlPath":"/users/:login/:userId/badges","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"GroupHubMembershipAction","type":"GROUP_HUB","urlPath":"/membership/join/:nodeId/:membershipType","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"IdeaReplyPage","type":"IDEA_REPLY","urlPath":"/idea/:boardId/:messageSubject/:messageId/comments/:replyId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"UserSettingsPage","type":"USER","urlPath":"/mysettings/:userSettingsTab","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"GroupHubsPage","type":"GROUP_HUB","urlPath":"/groups","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"ForumPostPage","type":"FORUM","urlPath":"/category/:categoryId/discussions/:boardId/create","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"OccasionRsvpActionPage","type":"OCCASION","urlPath":"/event/:boardId/:messageSubject/:messageId/rsvp/:responseType","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"VerifyUserEmailPage","type":"USER","urlPath":"/verifyemail/:userId/:verifyEmailToken","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"AllOccasionsPage","type":"OCCASION","urlPath":"/category/:categoryId/events/:boardId/all-events/(/:after|/:before)?","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"EventBoardPage","type":"EVENT","urlPath":"/category/:categoryId/events/:boardId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"TkbReplyPage","type":"TKB_REPLY","urlPath":"/kb/:boardId/:messageSubject/:messageId/comments/:replyId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"IdeaBoardPage","type":"IDEA","urlPath":"/category/:categoryId/ideas/:boardId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"CommunityGuideLinesPage","type":"COMMUNITY","urlPath":"/communityguidelines","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"CaseCreatePage","type":"SALESFORCE_CASE_CREATION","urlPath":"/caseportal/create","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"TkbEditPage","type":"TKB","urlPath":"/kb/:boardId/:messageSubject/:messageId/edit","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"ForgotPasswordPage","type":"USER","urlPath":"/forgotpassword","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"IdeaEditPage","type":"IDEA","urlPath":"/idea/:boardId/:messageSubject/:messageId/edit","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"TagPage","type":"COMMUNITY","urlPath":"/tag/:tagName","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"BlogBoardPage","type":"BLOG","urlPath":"/category/:categoryId/blog/:boardId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"OccasionMessagePage","type":"OCCASION_TOPIC","urlPath":"/event/:boardId/:messageSubject/:messageId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"ManageContentPage","type":"COMMUNITY","urlPath":"/managecontent","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"ClosedMembershipNodeNonMembersPage","type":"GROUP_HUB","urlPath":"/closedgroup/:groupHubId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"CommunityPage","type":"COMMUNITY","urlPath":"/","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"ForumMessagePage","type":"FORUM_TOPIC","urlPath":"/discussions/:boardId/:messageSubject/:messageId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"IdeaPostPage","type":"IDEA","urlPath":"/category/:categoryId/ideas/:boardId/create","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1730142000000,"localOverride":null,"page":{"id":"CommunityHub.Page","type":"CUSTOM","urlPath":"/Directory","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"BlogMessagePage","type":"BLOG_ARTICLE","urlPath":"/blog/:boardId/:messageSubject/:messageId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"RegistrationPage","type":"USER","urlPath":"/register","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"EditGroupHubPage","type":"GROUP_HUB","urlPath":"/group/:groupHubId/edit","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"ForumEditPage","type":"FORUM","urlPath":"/discussions/:boardId/:messageSubject/:messageId/edit","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"ResetPasswordPage","type":"USER","urlPath":"/resetpassword/:userId/:resetPasswordToken","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1730142000000,"localOverride":null,"page":{"id":"AllBlogs.Page","type":"CUSTOM","urlPath":"/blogs","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"TkbMessagePage","type":"TKB_ARTICLE","urlPath":"/kb/:boardId/:messageSubject/:messageId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"BlogEditPage","type":"BLOG","urlPath":"/blog/:boardId/:messageSubject/:messageId/edit","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"ManageUsersPage","type":"USER","urlPath":"/users/manage/:tab?/:manageUsersTab?","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"ForumReplyPage","type":"FORUM_REPLY","urlPath":"/discussions/:boardId/:messageSubject/:messageId/replies/:replyId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"PrivacyPolicyPage","type":"COMMUNITY","urlPath":"/privacypolicy","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"NotificationPage","type":"COMMUNITY","urlPath":"/notifications","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"UserPage","type":"USER","urlPath":"/users/:login/:userId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"OccasionReplyPage","type":"OCCASION_REPLY","urlPath":"/event/:boardId/:messageSubject/:messageId/comments/:replyId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"ManageMembersPage","type":"GROUP_HUB","urlPath":"/group/:groupHubId/manage/:tab?","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"SearchResultsPage","type":"COMMUNITY","urlPath":"/search","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"BlogReplyPage","type":"BLOG_REPLY","urlPath":"/blog/:boardId/:messageSubject/:messageId/replies/:replyId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"GroupHubPage","type":"GROUP_HUB","urlPath":"/group/:groupHubId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"TermsOfServicePage","type":"COMMUNITY","urlPath":"/termsofservice","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"CategoryPage","type":"CATEGORY","urlPath":"/category/:categoryId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"ForumViewAllTopicsPage","type":"FORUM","urlPath":"/category/:categoryId/discussions/:boardId/all-topics/(/:after|/:before)?","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"TkbPostPage","type":"TKB","urlPath":"/category/:categoryId/kbs/:boardId/create","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1737128934830,"localOverride":null,"page":{"id":"GroupHubPostPage","type":"GROUP_HUB","urlPath":"/group/:groupHubId/:boardId/create","__typename":"PageDescriptor"},"__typename":"PageResource"}],"localOverride":false},"CachedAsset:text:en_US-components/context/AppContext/AppContextProvider-0":{"__typename":"CachedAsset","id":"text:en_US-components/context/AppContext/AppContextProvider-0","value":{"noCommunity":"Cannot find community","noUser":"Cannot find current user","noNode":"Cannot find node with id {nodeId}","noMessage":"Cannot find message with id {messageId}"},"localOverride":false},"CachedAsset:text:en_US-shared/client/components/common/Loading/LoadingDot-0":{"__typename":"CachedAsset","id":"text:en_US-shared/client/components/common/Loading/LoadingDot-0","value":{"title":"Loading..."},"localOverride":false},"User:user:-1":{"__typename":"User","id":"user:-1","uid":-1,"login":"Deleted","email":"","avatar":null,"rank":null,"kudosWeight":1,"registrationData":{"__typename":"RegistrationData","status":"ANONYMOUS","registrationTime":null,"confirmEmailStatus":false,"registrationAccessLevel":"VIEW","ssoRegistrationFields":[]},"ssoId":null,"profileSettings":{"__typename":"ProfileSettings","dateDisplayStyle":{"__typename":"InheritableStringSettingWithPossibleValues","key":"layout.friendly_dates_enabled","value":"false","localValue":"true","possibleValues":["true","false"]},"dateDisplayFormat":{"__typename":"InheritableStringSetting","key":"layout.format_pattern_date","value":"MMM dd yyyy","localValue":"MM-dd-yyyy"},"language":{"__typename":"InheritableStringSettingWithPossibleValues","key":"profile.language","value":"en-US","localValue":"en","possibleValues":["en-US"]}},"deleted":false},"Theme:customTheme1":{"__typename":"Theme","id":"customTheme1"},"Category:category:AI":{"__typename":"Category","id":"category:AI","entityType":"CATEGORY","displayId":"AI","nodeType":"category","depth":3,"title":"Artificial Intelligence and Machine Learning","shortTitle":"Artificial Intelligence and Machine Learning","parent":{"__ref":"Category:category:solutions"},"categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:top":{"__typename":"Category","id":"category:top","displayId":"top","nodeType":"category","depth":0,"title":"Top","entityType":"CATEGORY","shortTitle":"Top"},"Category:category:communities":{"__typename":"Category","id":"category:communities","displayId":"communities","nodeType":"category","depth":1,"parent":{"__ref":"Category:category:top"},"title":"Communities","entityType":"CATEGORY","shortTitle":"Communities"},"Category:category:solutions":{"__typename":"Category","id":"category:solutions","displayId":"solutions","nodeType":"category","depth":2,"parent":{"__ref":"Category:category:communities"},"title":"Topics","entityType":"CATEGORY","shortTitle":"Topics"},"Blog:board:MachineLearningBlog":{"__typename":"Blog","id":"board:MachineLearningBlog","entityType":"BLOG","displayId":"MachineLearningBlog","nodeType":"board","depth":4,"conversationStyle":"BLOG","title":"AI - Machine Learning Blog","description":"","avatar":null,"profileSettings":{"__typename":"ProfileSettings","language":null},"parent":{"__ref":"Category:category:AI"},"ancestors":{"__typename":"CoreNodeConnection","edges":[{"__typename":"CoreNodeEdge","node":{"__ref":"Community:community:gxcuf89792"}},{"__typename":"CoreNodeEdge","node":{"__ref":"Category:category:communities"}},{"__typename":"CoreNodeEdge","node":{"__ref":"Category:category:solutions"}},{"__typename":"CoreNodeEdge","node":{"__ref":"Category:category:AI"}}]},"userContext":{"__typename":"NodeUserContext","canAddAttachments":false,"canUpdateNode":false,"canPostMessages":false,"isSubscribed":false},"boardPolicies":{"__typename":"BoardPolicies","canPublishArticleOnCreate":{"__typename":"PolicyResult","failureReason":{"__typename":"FailureReason","message":"error.lithium.policies.forums.policy_can_publish_on_create_workflow_action.accessDenied","key":"error.lithium.policies.forums.policy_can_publish_on_create_workflow_action.accessDenied","args":[]}}},"shortTitle":"AI - Machine Learning Blog","repliesProperties":{"__typename":"RepliesProperties","sortOrder":"REVERSE_PUBLISH_TIME","repliesFormat":"threaded"},"tagProperties":{"__typename":"TagNodeProperties","tagsEnabled":{"__typename":"PolicyResult","failureReason":null}},"requireTags":false,"tagType":"PRESET_ONLY"},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/cmstNC05WEo0blc\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/cmstNC05WEo0blc","height":512,"width":512,"mimeType":"image/png"},"Rank:rank:4":{"__typename":"Rank","id":"rank:4","position":5,"name":"Microsoft","color":"333333","icon":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/cmstNC05WEo0blc\"}"},"rankStyle":"OUTLINE"},"User:user:2080381":{"__typename":"User","id":"user:2080381","uid":2080381,"login":"Priya_Kedia","deleted":false,"avatar":{"__typename":"UserAvatar","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/m_assets/avatars/default/avatar-11.svg"},"rank":{"__ref":"Rank:rank:4"},"email":"","messagesCount":5,"biography":null,"topicsCount":5,"kudosReceivedCount":8,"kudosGivenCount":7,"kudosWeight":1,"registrationData":{"__typename":"RegistrationData","status":null,"registrationTime":"2023-10-12T23:57:55.179-07:00","confirmEmailStatus":null},"followersCount":null,"solutionsCount":0},"BlogTopicMessage:message:4362539":{"__typename":"BlogTopicMessage","uid":4362539,"subject":"Fine-Tuning Small Language Models for Function-Calling: A Comprehensive Guide","id":"message:4362539","revisionNum":4,"repliesCount":0,"author":{"__ref":"User:user:2080381"},"depth":0,"hasGivenKudo":false,"board":{"__ref":"Blog:board:MachineLearningBlog"},"conversation":{"__ref":"Conversation:conversation:4362539"},"messagePolicies":{"__typename":"MessagePolicies","canPublishArticleOnEdit":{"__typename":"PolicyResult","failureReason":{"__typename":"FailureReason","message":"error.lithium.policies.forums.policy_can_publish_on_edit_workflow_action.accessDenied","key":"error.lithium.policies.forums.policy_can_publish_on_edit_workflow_action.accessDenied","args":[]}},"canModerateSpamMessage":{"__typename":"PolicyResult","failureReason":{"__typename":"FailureReason","message":"error.lithium.policies.feature.moderation_spam.action.moderate_entity.allowed.accessDenied","key":"error.lithium.policies.feature.moderation_spam.action.moderate_entity.allowed.accessDenied","args":[]}}},"contentWorkflow":{"__typename":"ContentWorkflow","state":"PUBLISH","scheduledPublishTime":null,"scheduledTimezone":null,"userContext":{"__typename":"MessageWorkflowContext","canSubmitForReview":null,"canEdit":false,"canRecall":null,"canSubmitForPublication":null,"canReturnToAuthor":null,"canPublish":null,"canReturnToReview":null,"canSchedule":null},"shortScheduledTimezone":null},"readOnly":false,"editFrozen":false,"moderationData":{"__ref":"ModerationData:moderation_data:4362539"},"teaser":"","body":"

In the rapidly evolving landscape of artificial intelligence, fine-tuning small language models (SLMs) for use case specific workloads has become increasingly essential. The motivation behind this lies in the need for lower latency, reduced memory footprint, and improved accuracy—all while maintaining cost-effectiveness. This blog delves into the reasons for fine-tuning SLMs for function-call, key considerations, and a practical guide to implementing fine-tuning on Azure

\n

Why Fine-Tune Small Language Models? 

\n

1. Lower Latency and Reduced Memory Footprint : Smaller models with fewer weights inherently offer faster processing times due to reduced matrix multiplication operations. This lower latency is crucial for real-time applications where speed is paramount. Additionally, these models reduce the memory footprint, making them ideal for deployment in resource-constrained environments. 

\n

  2. Cost Efficiency : Fine-tuning smaller models is more cost-effective than training large models from scratch. It reduces the computational resources required, thereby lowering operational costs. This makes it a viable option for startups and enterprises looking to optimize their AI expenditure. 

\n

 3. Improved Accuracy : By tailoring a model to a specific function-calling use case, you can achieve higher accuracy. Fine-tuning allows the model to learn the intricacies of function-calling, thereby providing more relevant and precise outputs. 

\n

  4. Smaller Token Size : Smaller models and efficient token handling lead to a reduction in token size, which further optimizes processing speed and resource usage. 

\n

Key Considerations for Fine-Tuning 

\n

a. Selection of the Right Base Model : Choosing the appropriate base model is crucial. Evaluate industrial benchmarks and leaderboards, such as the [Berkeley Function Call Leaderboard] to guide your selection. Consider factors like model size, which affects GPU VRAM requirements, accuracy, and context length. For this blg post, we will use Llama-3.2-3b-instruct model as our base model for fine-tuning.

\n

b. Dataset Preparation : Proper dataset preparation is a cornerstone for successful fine-tuning of SLMs for function-calling tasks. The dataset must be representative of real-world scenarios and cover the full spectrum of use cases you anticipate. For this blog, we will utilize the glaiveai/glaive-function-calling-v2 dataset from Hugging Face, renowned for its comprehensive coverage of simple, multiple, and multi-turn function-calling scenarios across diverse domains.

\n

- Key Steps in Dataset Preparation: Understanding the Complexity of the Use Case

\n

Before diving into the technicalities of dataset preparation, it's essential to understand the complexity of the use case at hand. Is the task limited to function-calling, or does it involve a broader, more generic conversation? If the latter is true, it becomes imperative to ensure that the existing knowledge and capabilities of the language model (SLM) are preserved. The dataset should seamlessly integrate both function-call and non-function-call scenarios to provide a holistic conversational experience.

\n

Differentiating Function-Calling Scenarios

\n

Let's explore the different scenarios that might arise in function-calling applications:

\n
    \n
  1. Single Function-Calling: This scenario involves invoking a single function based on user input. For instance, in the travel industry, a user might ask, \"What are the available flights from New York to London on December 10th?\" The dataset should include examples that allow the model to extract relevant information and call the flight search function accurately.
  2. \n
  3. Multiple Function-Calling: Here, the language model must choose one function from a set of possible tools. For example, if a user asks, \"Can you book me a hotel or a flight to Paris?\" the dataset should provide instances where the model decides between booking a hotel or a flight based on user preferences or additional input.
  4. \n
  5. Multi-Turn Conversations: This scenario requires tools to be invoked in a sequence based on the conversation's state. Consider a user planning a vacation: \"I want to visit Italy. What are my options?\" followed by \"Book me a flight,\" and then \"Find a hotel in Rome.\" The dataset should capture the flow of conversation, enabling the model to handle each request in context.
  6. \n
  7. Parallel Function-Calling: In situations where multiple tools need to be invoked simultaneously, such as booking flights and hotels at the same time, the dataset should include examples that allow the model to manage these parallel tasks effectively. For instance, \"Book a flight to Tokyo and reserve a hotel in Shinjuku for the same dates.\"
  8. \n
  9. Handling Missing Information: A robust dataset should also include scenarios where the language model needs to ask the user for missing information. For example, if a user simply says, \"Book me a flight,\" the model should prompt, \"Could you please specify the destination and dates?\"
  10. \n
\n

c. Compute Selection 

\n

Ensure your compute setup has adequate VRAM to accommodate model weights, gradients, and activations. The compute should be tailored to your model size and batch size requirements. 

\n

d. Hyperparameter Selection : The selection of hyperparameters is a critical step that can significantly influence the performance of a model. Hyperparameters, unlike the model’s parameters, are not learned from the data but are set before the training process begins. Choosing the right hyperparameters can lead to faster convergence and higher accuracy, making this an area that demands careful attention.

\n

Hyperparameters can be thought of as the settings or knobs that you, as the model trainer, can adjust to tailor the training process. These include learning rate, batch size, the architecture of layers, and more. One of the leading methodologies for fine-tuning models is LORA (Low-Rank Adaptation), which has gained popularity due to its efficiency and effectiveness.

\n

LORA is a technique that allows for the efficient adaptation of large language models by introducing low-rank matrices during the training process. This approach reduces the number of trainable parameters, leading to faster convergence and reduced computational costs.

\n

When using LORA, two primary hyperparameters to consider are:

\n\n

A good starting point for these parameters might be a rank of 8 and an alpha of 16, but these values should be tailored based on the model's complexity and the specific task at hand.

\n

e. Optimize context length :  Another significant aspect of model fine-tuning, especially in function-calling scenarios, is the management of context length. In these prompts, we often provide detailed information such as function names, descriptions, and argument types, which consume a substantial number of tokens. Efficiently managing this context can lead to performance gains without sacrificing accuracy.

\n

Iterative Experimentation with Context Details: To optimize context length, an iterative experimentation approach is recommended:

\n
    \n
  1. Baseline Experiment: Start by including all possible details—function descriptions, argument types, and more. This serves as your baseline for comparison.
  2. \n
  3. Simplified Contexts: Gradually remove elements from the context:
  4. \n\n
\n

By incrementally simplifying the context, you can identify the minimal necessary 
While conducting these experiments, it is advantageous to utilize previous checkpoints. Instead of starting from the base model for each iteration, use the trained model from the previous step as a starting point. This approach can save time and computational resources, allowing for more efficient experimentation.  

\n

Fine-Tuning on Azure: Step-by-Step   

\n

Now lets run the fine-tuning job while adhering to all the guidelines and instructions shared above:- 

\n

1. Create an Azure Machine Learning Workspace: An Azure Machine Learning workspace is your control center for managing all the resources you need to train, deploy, automate, and manage machine learning models. It serves as a central repository for your datasets, compute resources, and models. To get started, you can create a workspace through the Azure portal by navigating to the Azure Machine Learning service and selecting \"Create new workspace.\" Ensure you configure resource group, workspace name, region, and other necessary settings.

\n

2. Create a Compute Instance: To run your Python notebook and execute scripts, you need a compute instance. This virtual machine in Azure Machine Learning allows you to perform data preparation, training, and experimentation. Go to the \"Compute\" section in your workspace, select \"Create,\" and choose a compute instance that fits your needs, ensuring it has the necessary specifications for your workload.

\n

 3: Dataset Preparation: For this blog, we'll use the glaiveai/glaive-function-calling-v2 dataset from Hugging Face, which includes simple, multi-turn function calling and generic conversations across various domains. The dataset needs to be formatted to be compatible with the OpenAI format:

\n\n
def parse_conversation(input_string):  \n    \n    ROLE_MAPPING = {\"USER\" : \"user\", \"ASSISTANT\" : \"assistant\", \"SYSTEM\" : \"system\", \"FUNCTION RESPONSE\" : \"tool\"}\n\n    # Regular expression to split the conversation based on SYSTEM, USER, and ASSISTANT  \n    pattern = r\"(SYSTEM|USER|ASSISTANT|FUNCTION RESPONSE):\"  \n      \n    # Split the input string and keep the delimiters  \n    parts = re.split(pattern, input_string)  \n      \n    # Initialize the list to store conversation entries  \n    conversation = []  \n      \n    # Iterate over the parts, skipping the first empty string  \n    for i in range(1, len(parts), 2):  \n        role = parts[i].strip()  \n        content = parts[i + 1].strip()  \n        content = content.replace(\"<|endoftext|>\", \"\").strip()\n\n        if content.startswith('<functioncall>'):  # build structured data for function call\n                # try to turn function call from raw text to structured data\n                content = content.replace('<functioncall>', '').strip()\n                # replace single quotes with double quotes for valid JSON\n                clean_content = content.replace(\"'{\", '{').replace(\"'}\", '}')\n                data_json = json.loads(clean_content)\n                # Make it compatible with openAI prompt format\n                func_call = {'recipient_name': f\"functions.{data_json['name']}\", 'parameters': data_json['arguments']}\n                content = {'tool_uses': [func_call]}\n          \n        # Append a dictionary with the role and content to the conversation list  \n        conversation.append({\"role\": ROLE_MAPPING[role], \"content\": content})  \n      \n    return conversation  \n\ndef prepare_dataset(tokenizer, args):\n    \n    # Create the cache_dir\n    cache_dir = \"./outputs/dataset\"\n    os.makedirs(cache_dir, exist_ok = True)\n\n    # Load the dataset from disk\n    train_dataset = load_from_disk(args.train_dir) \n    eval_dataset = load_from_disk(args.val_dir)\n\n    column_names = list(train_dataset.features)\n\n    def apply_chat_template(examples):\n        conversations = []\n        for system, chat in zip(examples[\"system\"], examples[\"chat\"]):\n            try:\n                system_message = parse_conversation(system)\n                chat_message = parse_conversation(chat)\n                message = system_message + chat_message\n                conversations.append(message)\n            except Exception as e:\n                print(e) \n\n        text = [tokenizer.apply_chat_template(message, tokenize=False, add_generation_prompt=False) for message in conversations]\n        return {\"text\": text}\n\n    # process the dataseta and drop unused columns\n    processed_train_dataset = train_dataset.map(apply_chat_template, cache_file_name = f\"{cache_dir}/cache.arrow\", batched = True, remove_columns=column_names)\n    processed_eval_dataset = eval_dataset.map(apply_chat_template, cache_file_name = f\"{cache_dir}/cache.arrow\", batched = True, remove_columns=column_names)\n\n    return processed_train_dataset, processed_eval_dataset\n
\n

4: Create a Data Asset: Azure Machine Learning allows you to register datasets as data assets, making them easily manageable and reusable:

\n
def get_or_create_data_asset(ml_client, data_name, data_local_dir, update=False):\n    \n    try:\n        latest_data_version = max([int(d.version) for d in ml_client.data.list(name=data_name)])\n        if update:\n            raise ResourceExistsError('Found Data asset, but will update the Data.')            \n        else:\n            data_asset = ml_client.data.get(name=data_name, version=latest_data_version)\n            logger.info(f\"Found Data asset: {data_name}. Will not create again\")\n    except (ResourceNotFoundError, ResourceExistsError) as e:\n        data = Data(\n            path=data_local_dir,\n            type=AssetTypes.URI_FOLDER,\n            description=f\"{data_name} for fine tuning\",\n            tags={\"FineTuningType\": \"Instruction\", \"Language\": \"En\"},\n            name=data_name\n        )\n        data_asset = ml_client.data.create_or_update(data)\n        logger.info(f\"Created/Updated Data asset: {data_name}\")\n        \n    return data_asset\n\ntrain_data = get_or_create_data_asset(ml_client, f\"{AZURE_DATA_NAME}_train\", data_local_dir=f\"{DATA_DIR}/train\", update=True)\nval_data = get_or_create_data_asset(ml_client, f\"{AZURE_DATA_NAME}_val\", data_local_dir=f\"{DATA_DIR}/val\", update=True)\ntest_data = get_or_create_data_asset(ml_client, f\"{AZURE_DATA_NAME}_test\", data_local_dir=f\"{DATA_DIR}/test\", update=True)
\n

5: Create an Environment: While Azure provides built-in environments for common use cases, creating a custom environment tailored to your specific needs can be beneficial.

\n

An environment in Azure ML is essentially a containerized setup that defines the software, libraries, and other dependencies required to run your machine learning workload.

\n

Why Use Environments?

\n
    \n
  1. Reproducibility: By defining an environment, you ensure that your training and inference processes are reproducible, with the same configuration used every time.
  2. \n
  3. Consistency: Environments help maintain consistency across different runs and teams, reducing \"it works on my machine\" problems.
  4. \n
  5. Portability: They encapsulate your dependencies, making it easier to move and share your ML projects across different Azure services or even with external collaborators.
  6. \n
\n
%%writefile {CLOUD_DIR}/train/Dockerfile\n\nFROM mcr.microsoft.com/aifx/acpt/stable-ubuntu2004-cu124-py310-torch241:biweekly.202410.2\n\nUSER root\n\n# support Deepspeed launcher requirement of passwordless ssh login\nRUN apt-get update && apt-get -y upgrade\nRUN pip install --upgrade pip\nRUN apt-get install -y openssh-server openssh-client\n\n# Install pip dependencies\nCOPY requirements.txt .\nRUN pip install -r requirements.txt --no-cache-dir\n\nRUN MAX_JOBS=4 pip install flash-attn==2.6.3 --no-build-isolation
def get_or_create_docker_environment_asset(ml_client, env_name, docker_dir, update=False):\n    \n    try:\n        latest_env_version = max([int(e.version) for e in ml_client.environments.list(name=env_name)])\n        if update:\n            raise ResourceExistsError('Found Environment asset, but will update the Environment.')\n        else:\n            env_asset = ml_client.environments.get(name=env_name, version=latest_env_version)\n            print(f\"Found Environment asset: {env_name}. Will not create again\")\n    except (ResourceNotFoundError, ResourceExistsError) as e:\n        print(f\"Exception: {e}\")\n        env_docker_image = Environment(\n            build=BuildContext(path=docker_dir),\n            name=env_name,\n            description=\"Environment created from a Docker context.\",\n        )\n        env_asset = ml_client.environments.create_or_update(env_docker_image)\n        print(f\"Created Environment asset: {env_name}\")\n    \n    return env_asset\n\nenv = get_or_create_docker_environment_asset(ml_client, azure_env_name, docker_dir=f\"{CLOUD_DIR}/train\", update=False)
\n

Reference : training.ipynb

\n

6: Create a Training Script: Your training script will handle the fine-tuning process and log metrics using MLflow, which is tightly integrated with Azure Machine Learning. This involves - Loading the dataset, defining the model architecture, writing functions to track and log metrics such as training and evaluation loss.

\n
def main(args):\n\n     ###################\n    # Hyper-parameters\n    ###################\n    # Only overwrite environ if wandb param passed\n    if len(args.wandb_project) > 0:\n        os.environ['WANDB_API_KEY'] = args.wandb_api_key    \n        os.environ[\"WANDB_PROJECT\"] = args.wandb_project\n    if len(args.wandb_watch) > 0:\n        os.environ[\"WANDB_WATCH\"] = args.wandb_watch\n    if len(args.wandb_log_model) > 0:\n        os.environ[\"WANDB_LOG_MODEL\"] = args.wandb_log_model\n        \n    use_wandb = len(args.wandb_project) > 0 or (\"WANDB_PROJECT\" in os.environ and len(os.environ[\"WANDB_PROJECT\"]) > 0) \n        \n    training_config = {\"per_device_train_batch_size\" : args.train_batch_size,  # Controls the batch size per device\n                       \"per_device_eval_batch_size\" : args.eval_batch_size,    # Controls the batch size for evaluation\n                       \"gradient_accumulation_steps\" : args.grad_accum_steps,\n                       \"warmup_ratio\" : args.warmup_ratio,  # Controls the ratio of warmup steps\n                        \"learning_rate\" : args.learning_rate,  \n                        \"fp16\" : not torch.cuda.is_bf16_supported(),\n                        \"bf16\" : torch.cuda.is_bf16_supported(),\n                        \"optim\" : \"adamw_8bit\",\n                        \"lr_scheduler_type\" : args.lr_scheduler_type,\n                        \"output_dir\" : args.output_dir,\n                        \"logging_steps\": args.logging_steps,\n                        \"logging_strategy\": \"epoch\",\n                        \"save_steps\": args.save_steps,\n                        \"eval_strategy\": \"epoch\",\n                        \"num_train_epochs\": args.epochs,\n                        # \"load_best_model_at_end\": True,\n                        \"save_only_model\": False,\n                        \"seed\" : 0\n    }\n\n    peft_config = {\n        \"r\": args.lora_r,\n        \"lora_alpha\": args.lora_alpha,\n        \"lora_dropout\": args.lora_dropout,\n        \"bias\": \"none\",\n        #\"target_modules\": \"all-linear\",\n        \"target_modules\": [\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\", \"gate_proj\", \"up_proj\", \"down_proj\"],\n        \"modules_to_save\": None,\n        \"use_gradient_checkpointing\": \"unsloth\",\n        \"use_rslora\": False,\n        \"loftq_config\": None,\n    }\n\n    checkpoint_dir = os.path.join(args.output_dir, \"checkpoints\")\n\n    train_conf = TrainingArguments(\n        **training_config,\n        report_to=\"wandb\" if use_wandb else \"azure_ml\",\n        run_name=args.wandb_run_name if use_wandb else None,    \n    )\n\n    model, tokenizer = load_model(args)\n    model = FastLanguageModel.get_peft_model(model, **peft_config)\n\n    ###############\n    # Setup logging\n    ###############\n    logging.basicConfig(\n        format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n        datefmt=\"%Y-%m-%d %H:%M:%S\",\n        handlers=[logging.StreamHandler(sys.stdout)],\n    )\n    log_level = train_conf.get_process_log_level()\n    logger.setLevel(log_level)\n\n    datasets.utils.logging.set_verbosity(log_level)\n    transformers.utils.logging.set_verbosity(log_level)\n    transformers.utils.logging.enable_default_handler()\n    transformers.utils.logging.enable_explicit_format()\n\n    # Log on each process a small summary\n    logger.warning(\n        f\"Process rank: {train_conf.local_rank}, device: {train_conf.device}, n_gpu: {train_conf.n_gpu}\"\n        + f\" distributed training: {bool(train_conf.local_rank != -1)}, 16-bits training: {train_conf.fp16}\"\n    )\n    logger.info(f\"Training/evaluation parameters {train_conf}\")\n    logger.info(f\"PEFT parameters {peft_config}\") \n\n    # Load the dataset\n    train_dataset, eval_dataset = prepare_dataset(tokenizer, args)\n\n     ###########\n    # Training\n    ###########\n    trainer = SFTTrainer(\n        model=model,\n        args=train_conf,\n        tokenizer = tokenizer,\n        train_dataset=train_dataset,\n        eval_dataset=eval_dataset,\n        dataset_text_field=\"text\",\n        packing = False        # Can make training 5x faster for shorter responses\n    )\n\n    # Show current memory stats\n    gpu_stats = torch.cuda.get_device_properties(0)\n    start_gpu_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3)\n    max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3)\n    logger.info(f\"GPU = {gpu_stats.name}. Max memory = {max_memory} GB.\")\n    logger.info(f\"{start_gpu_memory} GB of memory reserved.\")\n\n    last_checkpoint = None\n    if os.path.isdir(checkpoint_dir):\n        checkpoints = [os.path.join(checkpoint_dir, d) for d in os.listdir(checkpoint_dir)]\n        if len(checkpoints) > 0:\n            checkpoints.sort(key=os.path.getmtime, reverse=True)\n            last_checkpoint = checkpoints[0]  \n    \n    trainer_stats = trainer.train(resume_from_checkpoint=last_checkpoint)\n\n    #############\n    # Evaluation\n    #############\n    tokenizer.padding_side = \"left\"\n    metrics = trainer.evaluate()\n    metrics[\"eval_samples\"] = len(eval_dataset)\n    trainer.log_metrics(\"eval\", metrics)\n    trainer.save_metrics(\"eval\", metrics)\n    \n    # ############\n    # # Save model\n    # ############\n    os.makedirs(args.model_dir, exist_ok=True)\n\n    if args.save_merged_model:\n        print(\"Save PEFT model with merged 16-bit weights\")\n        model.save_pretrained_merged(\"outputs\", tokenizer, save_method=\"merged_16bit\")\n    else:\n        print(f\"Save PEFT model: {args.model_dir}/model\")    \n        model.save_pretrained(f\"{args.model_dir}/model\")\n\n    tokenizer.save_pretrained(args.model_dir)
\n

Reference : train.py

\n

7: Create the Compute Cluster: . For this experiment, we are using Standard_NC24ads_A100_v4 which has 1 GPU and 80 GB of VRAM. Select the compute based on the model size and batch size.

\n
from azure.ai.ml.entities import AmlCompute\n\n### Create the compute cluster\ntry:\n    compute = ml_client.compute.get(azure_compute_cluster_name)\n    print(\"The compute cluster already exists! Reusing it for the current run\")\nexcept Exception as ex:\n    print(\n        f\"Looks like the compute cluster doesn't exist. Creating a new one with compute size {azure_compute_cluster_size}!\"\n    )\n    try:\n        print(\"Attempt #1 - Trying to create a dedicated compute\")\n        tier = 'LowPriority' if USE_LOWPRIORITY_VM else 'Dedicated'\n        compute = AmlCompute(\n            name=azure_compute_cluster_name,\n            size=azure_compute_cluster_size,\n            tier=tier,\n            max_instances=1,  # For multi node training set this to an integer value more than 1\n        )\n        ml_client.compute.begin_create_or_update(compute).wait()\n    except Exception as e:\n        print(\"Error\")
\n

8: Submit the Fine-Tuning Job

\n

With everything set up, you can now submit your fine-tuning job:

\n
from azure.ai.ml import command\nfrom azure.ai.ml import Input\nfrom azure.ai.ml.entities import ResourceConfiguration\n\njob = command(\n    inputs=dict(\n        #train_dir=Input(type=\"uri_folder\", path=DATA_DIR), # Get data from local path\n        train_dir=Input(path=f\"{AZURE_DATA_NAME}_train@latest\"),  # Get data from Data asset\n        val_dir = Input(path=f\"{AZURE_DATA_NAME}_val@latest\"),\n        epoch=d['train']['epoch'],\n        train_batch_size=d['train']['train_batch_size'],\n        eval_batch_size=d['train']['eval_batch_size'],  \n    ),\n    code=f\"{CLOUD_DIR}/train\",  # local path where the code is stored\n    compute=azure_compute_cluster_name,\n    command=\"python train_v3.py --train_dir ${{inputs.train_dir}} --val_dir ${{inputs.val_dir}} --train_batch_size ${{inputs.train_batch_size}} --eval_batch_size ${{inputs.eval_batch_size}}\",\n    #environment=\"azureml://registries/azureml/environments/acft-hf-nlp-gpu/versions/77\", # Use built-in Environment asset\n    environment=f\"{azure_env_name}@latest\",\n    distribution={\n        \"type\": \"PyTorch\",\n        \"process_count_per_instance\": 1, # For multi-gpu training set this to an integer value more than 1\n    },\n)\nreturned_job = ml_client.jobs.create_or_update(job)\nml_client.jobs.stream(returned_job.name)
\n

9: Monitor Training Metrics: After initiating the job, keep an eye on the output for key metrics like training loss and evaluation loss. Since we've logged the results to MLflow, which is seamlessly integrated with Azure Machine Learning, we can easily review the loss function by navigating to the metrics tab within the jobs section.

\n\n

Key Takeways:

\n\n

Overall, it looks promising, so lets go ahead and register the model.

\n

10: Register the Model: After fine-tuning, register the model to make it available for deployment:

\n
from azureml.core import Workspace, Run \nimport os  \n  \n# Connect to your workspace  \nws = Workspace.from_config()  \n  \nexperiment_name =  'experiment_name'\nrun_id = 'job_name'\n\nrun = Run(ws.experiments[experiment_name], run_id)  \n\n# Register the model  \nmodel = run.register_model(  \n    model_name=d[\"serve\"][\"azure_model_name\"],  # this is the name the model will be registered under  \n    model_path=\"outputs\"  # this is the path to the model file in the run's outputs  \n)  \n# Create a local directory to save the outputs  \nlocal_folder = './model_v2'  \nos.makedirs(local_folder, exist_ok=True)  \n  \n# Download the entire outputs folder  \nrun.download_files(prefix='outputs', output_directory=local_folder)  
\n

Step 11: Deploy the Model to a Managed Online Endpoint: Managed online endpoints provide a seamless way to deploy models without managing underlying infrastructure. They offer scalability, versioning, and easy rollback compared to deploying on an Azure Kubernetes Service (AKS) cluster.

\n

11 a. Build the enviornment: For deploying the model to managed online endpoint, first create the environment with required dependencies and webserver for inference.

\n
%%writefile {CLOUD_DIR}/serve/Dockerfile\n\nFROM mcr.microsoft.com/aifx/acpt/stable-ubuntu2004-cu124-py310-torch241:biweekly.202410.2\n\n# Install pip dependencies\nCOPY requirements.txt .\nRUN pip install -r requirements.txt --no-cache-dir\n\n# Inference requirements\nCOPY --from=mcr.microsoft.com/azureml/o16n-base/python-assets:20230419.v1 /artifacts /var/\n\nRUN /var/requirements/install_system_requirements.sh && \\\n    cp /var/configuration/rsyslog.conf /etc/rsyslog.conf && \\\n    cp /var/configuration/nginx.conf /etc/nginx/sites-available/app && \\\n    ln -sf /etc/nginx/sites-available/app /etc/nginx/sites-enabled/app && \\\n    rm -f /etc/nginx/sites-enabled/default\nENV SVDIR=/var/runit\nENV WORKER_TIMEOUT=400\nEXPOSE 5001 8883 8888\n\n# support Deepspeed launcher requirement of passwordless ssh login\nRUN apt-get update\nRUN apt-get install -y openssh-server openssh-client\n\nRUN MAX_JOBS=4 pip install flash-attn==2.6.3 --no-build-isolation
\n

Reference : serving.ipynb

\n

11b. Create a serving script: Creating a serve script for inference is a crucial step in deploying your machine learning model to a production environment. This script handles incoming requests, processes input data, runs the model inference, and returns the results. In Azure Machine Learning, the serve script is part of the deployment package for your model, typically used in conjunction with a managed endpoint or a Kubernetes service.

\n

A serve script in Azure ML typically consists of two main functions:

\n
    \n
  1. init(): This function initializes the model and any other necessary resources. It is called once when the deployment is first loaded.
  2. \n
  3. run(data): This function is called every time a request is made to the deployed model. It processes the incoming data, performs inference using the model, and returns the results.
  4. \n
\n
import os\nimport re\nimport json\nimport torch\nimport base64\nimport logging\n\nfrom io import BytesIO\nfrom transformers import AutoTokenizer, AutoProcessor, pipeline\nfrom transformers import AutoModelForCausalLM, AutoProcessor\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\ndef init():\n    \"\"\"\n    This function is called when the container is initialized/started, typically after create/update of the deployment.\n    You can write the logic here to perform init operations like caching the model in memory\n    \"\"\"\n    global model\n    global tokenizer\n    # AZUREML_MODEL_DIR is an environment variable created during deployment.\n    # It is the path to the model folder (./azureml-models/$MODEL_NAME/$VERSION)\n    # Please provide your model's folder name if there is one\n    model_name_or_path = os.path.join(\n        os.getenv(\"AZUREML_MODEL_DIR\"), \"outputs\"\n    )\n    \n    model_kwargs = dict(\n        trust_remote_code=True,    \n        device_map={\"\":0},\n        torch_dtype=\"auto\" \n    )\n    \n    model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map ={\"\" : 0}, **model_kwargs)\n    tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)    \n\n    logging.info(\"Loaded model.\")\n    \ndef run(json_data: str):\n    logging.info(\"Request received\")\n    data = json.loads(json_data)\n    input_data = data[\"input_data\"]\n    params = data['params']\n\n    pipe = pipeline(\"text-generation\", model = model, tokenizer = tokenizer)\n    output = pipe(input_data, **params)\n    result = output[0][\"generated_text\"]\n    logging.info(f\"Generated text : {result}\")\n    json_result = {\"result\" : str(result)}\n\n    return json_result
\n

Reference : score.py

\n

11c. Create a managed online endpoint and deploy the model to endpoint: Creating an endpoint and deploying your model on Azure Machine Learning is the final step to make your model accessible for real-time inference. This process involves setting up a service that can handle incoming requests, execute the model, and return the results.

\n

Why Create an Endpoint?

\n

An endpoint is a network-accessible interface that allows external applications or users to interact with your deployed machine learning model. Creating an endpoint is crucial for the following reasons:

\n
    \n
  1. Accessibility: Endpoints make your model accessible over the internet or within a secured network, enabling other applications, services, or users to send requests and receive responses.
  2. \n
  3. API Integration: By exposing your model as a RESTful API, endpoints facilitate integration with various applications, allowing seamless communication and data exchange.
  4. \n
  5. Load Management: An endpoint can manage requests from multiple clients, handling concurrent requests and distributing the load appropriately.
  6. \n
  7. Security: Endpoints provide mechanisms for authentication and authorization, ensuring that only authorized users can access the model.
  8. \n
  9. Scalability: Azure-managed endpoints can automatically scale based on demand, ensuring that your model can handle varying workloads without manual intervention.
  10. \n
\n
from azure.ai.ml.entities import (\n    ManagedOnlineEndpoint,\n    IdentityConfiguration,\n    ManagedIdentityConfiguration,\n)\n\nazure_endpoint_name = d['serve']['azure_endpoint_name']\n# Check if the endpoint already exists in the workspace\ntry:\n    endpoint = ml_client.online_endpoints.get(azure_endpoint_name)\n    print(\"---Endpoint already exists---\")\nexcept:\n    # Create an online endpoint if it doesn't exist\n\n    # Define the endpoint\n    endpoint = ManagedOnlineEndpoint(\n        name=azure_endpoint_name,\n        description=f\"Test endpoint for {model.name}\",\n    )\n\n# Trigger the endpoint creation\ntry:\n    ml_client.begin_create_or_update(endpoint).wait()\n    print(\"\\n---Endpoint created successfully---\\n\")\nexcept Exception as err:\n    raise RuntimeError(\n        f\"Endpoint creation failed. Detailed Response:\\n{err}\"\n    ) from err
\n

Why Deploy a Model?

\n

Deployment is the process of transferring your trained machine learning model from a development environment to a production environment where it can serve real-time predictions. Deployment is critical because:

\n
    \n
  1. Operationalization: Deployment operationalizes your model, moving it from an experimental or development phase to a live environment where it can deliver value to end-users or systems.
  2. \n
  3. Resource Allocation: Deploying a model involves configuring the necessary compute resources (such as CPU, memory, and GPUs) to ensure optimal performance during inference.
  4. \n
  5. Environment Consistency: During deployment, the model is packaged with its dependencies in a consistent environment, ensuring reproducibility and minimizing discrepancies between development and production.
  6. \n
  7. Monitoring and Maintenance: Deployment sets up the infrastructure to monitor the model's performance, usage, and health, allowing for ongoing maintenance and updates.
  8. \n
  9. Version Control: Deployment allows you to manage and update different versions of your model, providing flexibility to roll back or switch to newer versions as needed.
    from azure.ai.ml.entities import (    \n    OnlineRequestSettings,\n    CodeConfiguration,\n    ManagedOnlineDeployment,\n    ProbeSettings,\n    Environment\n)\n\nazure_deployment_name = f\"{d['serve']['azure_deployment_name']}-v1\"\n\ndeployment = ManagedOnlineDeployment(\n    name=azure_deployment_name,\n    endpoint_name=azure_endpoint_name,\n    model=model,\n    instance_type=azure_compute_cluster_size,\n    instance_count=1,\n    #code_configuration=code_configuration,\n    environment = env,\n    scoring_script=\"score.py\",\n    code_path=f\"./{CLOUD_DIR}/inference\",\n    #environment_variables=deployment_env_vars,\n    request_settings=OnlineRequestSettings(max_concurrent_requests_per_instance=20,\n                                           request_timeout_ms=90000, max_queue_wait_ms=60000),\n    liveness_probe=ProbeSettings(\n        failure_threshold=30,\n        success_threshold=1,\n        period=100,\n        initial_delay=500,\n    ),\n    readiness_probe=ProbeSettings(\n        failure_threshold=30,\n        success_threshold=1,\n        period=100,\n        initial_delay=500,\n    ),\n)\n\n# Trigger the deployment creation\ntry:\n    ml_client.begin_create_or_update(deployment).wait()\n    print(\"\\n---Deployment created successfully---\\n\")\nexcept Exception as err:\n    raise RuntimeError(\n        f\"Deployment creation failed. Detailed Response:\\n{err}\"\n    ) from err\n    \nendpoint.traffic = {azure_deployment_name: 100}\nendpoint_poller = ml_client.online_endpoints.begin_create_or_update(endpoint)  
  10. \n
\n

Step 12: Run Inference on Sample Data: Test the deployed model using sample data that expects function calls:

\n
import json\nimport os \n\nsample = {\n    \"input_data\": \n        [\n            {'role': 'system', 'content': 'You are an helpful assistant who has access to the following functions to help the user, you can use the functions if needed- { \"name\": \"calculate_shipping_cost\", \"description\": \"Calculate the cost of shipping a package\", \"parameters\": { \"type\": \"object\", \"properties\": { \"weight\": { \"type\": \"number\", \"description\": \"The weight of the package in pounds\" }, \"destination\": { \"type\": \"string\", \"description\": \"The destination of the package\" } }, \"required\": [ \"weight\", \"destination\" ] }}}\"'},\n            {'role': 'user', 'content': 'Can you help me with shipping cost for a package?'},\n            {'role': 'assistant', 'content': 'Sure! I can help you with that. Please provide me with the weight and destination of the package.'},\n            {'role': 'user', 'content': 'The weight of the package is 10 pounds and the destination is New York.'}\n        ],\n    \"params\": {\n        \"temperature\": 0.1,\n        \"max_new_tokens\": 512,\n        \"do_sample\": True,\n        \"return_full_text\": False\n    }\n}\n# Dump the sample data into a json file\nwith open(request_file, \"w\") as f:\n    json.dump(sample, f)\n\nresult = ml_client.online_endpoints.invoke(\n    endpoint_name=azure_endpoint_name,\n    deployment_name=azure_deployment_name,\n    request_file=request_file\n)\n\nresult_json = json.loads(result)\nresult = result_json['result']\n\nprint(result)
\n

Step 13: Compare with Base Model: Now, lets run the same sample through the base model to observe the difference in performance. As we can see, while the fine-tuned model did a perfect job of generating response with the right function and arguments, the base model struggles to generate the desired output

\n

Step 14:  Rerun the fine-tuning job by removing function descriptions from the system message: Now, lets rerun the experiment, but this time we will drop the function description from the dataset for context length optimization

\n
def remove_desc_from_prompts(data):\n    system_message = data['system']\n    pattern = r'\"description\":\\s*\"[^\"]*\",?\\n?'  \n    \n    # Remove the \"description\" fields  \n    cleaned_string = re.sub(pattern, '\"description\":\"\",', system_message)  \n\n    return cleaned_string\n\n## Update the system message by removing function descriptions and argument description\ntrain_dataset = train_dataset.map(lambda x : {\"updated_system\" : remove_desc_from_prompts(x)}, remove_columns = [\"system\"])\ntest_dataset = test_dataset.map(lambda x : {\"updated_system\" : remove_desc_from_prompts(x)}, remove_columns = [\"system\"])\nval_dataset = val_dataset.map(lambda x : {\"updated_system\" : remove_desc_from_prompts(x)}, remove_columns = [\"system\"])\n\ntrain_dataset.save_to_disk(f\"{DATA_DIR}/train\")\ntest_dataset.save_to_disk(f\"{DATA_DIR}/test\")\nval_dataset.save_to_disk(f\"{DATA_DIR}/val\")
\n

Reference : preprocess.py

\n

As can be seen from the results, removing the function description doesn't degrade the model performance but instead this fine-tuned model version requires lesser input tokens resulting in a significant reduction in token consumption with improved latency.

\n\n

Step 15: Further Exploration: Consider removing arguments or even the function itself in subsequent experiments to evaluate performance.

\n

Conclusion

\n

This blog post has walked through the process of fine-tuning an SLM for function-calling on Azure Machine Learning. By following these steps, you can effectively tailor a model to meet specific functional requirements.

\n

You can access the full code here.

\n

For a deeper dive into evaluating fine-tuned models, including metrics and code samples, check out the next blog post. By leveraging Azure's powerful tools, you can streamline the development and deployment of machine learning models, making them more efficient and effective for your specific tasks.

\n

Reference:

\n

Fine tuning for function calling | OpenAI Cookbook

\n

Fine-tuning function calls with Azure OpenAI Service - Azure AI services | Microsoft Learn

\n

michaelnny/Llama3-FunctionCalling: Fine-tune Llama3 model to support function calling

\n

Fine Tuning LLMs for Function Calling w/Pawel Garbacki - YouTube

\n

slm-innovator-lab/2_slm-fine-tuning-mlstudio at main · Azure/slm-innovator-lab

","body@stringLength":"46977","rawBody":"

In the rapidly evolving landscape of artificial intelligence, fine-tuning small language models (SLMs) for use case specific workloads has become increasingly essential. The motivation behind this lies in the need for lower latency, reduced memory footprint, and improved accuracy—all while maintaining cost-effectiveness. This blog delves into the reasons for fine-tuning SLMs for function-call, key considerations, and a practical guide to implementing fine-tuning on Azure

\n

Why Fine-Tune Small Language Models? 

\n

1. Lower Latency and Reduced Memory Footprint : Smaller models with fewer weights inherently offer faster processing times due to reduced matrix multiplication operations. This lower latency is crucial for real-time applications where speed is paramount. Additionally, these models reduce the memory footprint, making them ideal for deployment in resource-constrained environments. 

\n

  2. Cost Efficiency : Fine-tuning smaller models is more cost-effective than training large models from scratch. It reduces the computational resources required, thereby lowering operational costs. This makes it a viable option for startups and enterprises looking to optimize their AI expenditure. 

\n

 3. Improved Accuracy : By tailoring a model to a specific function-calling use case, you can achieve higher accuracy. Fine-tuning allows the model to learn the intricacies of function-calling, thereby providing more relevant and precise outputs. 

\n

  4. Smaller Token Size : Smaller models and efficient token handling lead to a reduction in token size, which further optimizes processing speed and resource usage. 

\n

Key Considerations for Fine-Tuning 

\n

a. Selection of the Right Base Model : Choosing the appropriate base model is crucial. Evaluate industrial benchmarks and leaderboards, such as the [Berkeley Function Call Leaderboard] to guide your selection. Consider factors like model size, which affects GPU VRAM requirements, accuracy, and context length. For this blg post, we will use Llama-3.2-3b-instruct model as our base model for fine-tuning.

\n

b. Dataset Preparation : Proper dataset preparation is a cornerstone for successful fine-tuning of SLMs for function-calling tasks. The dataset must be representative of real-world scenarios and cover the full spectrum of use cases you anticipate. For this blog, we will utilize the glaiveai/glaive-function-calling-v2 dataset from Hugging Face, renowned for its comprehensive coverage of simple, multiple, and multi-turn function-calling scenarios across diverse domains.

\n

- Key Steps in Dataset Preparation: Understanding the Complexity of the Use Case

\n

Before diving into the technicalities of dataset preparation, it's essential to understand the complexity of the use case at hand. Is the task limited to function-calling, or does it involve a broader, more generic conversation? If the latter is true, it becomes imperative to ensure that the existing knowledge and capabilities of the language model (SLM) are preserved. The dataset should seamlessly integrate both function-call and non-function-call scenarios to provide a holistic conversational experience.

\n

Differentiating Function-Calling Scenarios

\n

Let's explore the different scenarios that might arise in function-calling applications:

\n
    \n
  1. Single Function-Calling: This scenario involves invoking a single function based on user input. For instance, in the travel industry, a user might ask, \"What are the available flights from New York to London on December 10th?\" The dataset should include examples that allow the model to extract relevant information and call the flight search function accurately.
  2. \n
  3. Multiple Function-Calling: Here, the language model must choose one function from a set of possible tools. For example, if a user asks, \"Can you book me a hotel or a flight to Paris?\" the dataset should provide instances where the model decides between booking a hotel or a flight based on user preferences or additional input.
  4. \n
  5. Multi-Turn Conversations: This scenario requires tools to be invoked in a sequence based on the conversation's state. Consider a user planning a vacation: \"I want to visit Italy. What are my options?\" followed by \"Book me a flight,\" and then \"Find a hotel in Rome.\" The dataset should capture the flow of conversation, enabling the model to handle each request in context.
  6. \n
  7. Parallel Function-Calling: In situations where multiple tools need to be invoked simultaneously, such as booking flights and hotels at the same time, the dataset should include examples that allow the model to manage these parallel tasks effectively. For instance, \"Book a flight to Tokyo and reserve a hotel in Shinjuku for the same dates.\"
  8. \n
  9. Handling Missing Information: A robust dataset should also include scenarios where the language model needs to ask the user for missing information. For example, if a user simply says, \"Book me a flight,\" the model should prompt, \"Could you please specify the destination and dates?\"
  10. \n
\n

c. Compute Selection 

\n

Ensure your compute setup has adequate VRAM to accommodate model weights, gradients, and activations. The compute should be tailored to your model size and batch size requirements. 

\n

d. Hyperparameter Selection : The selection of hyperparameters is a critical step that can significantly influence the performance of a model. Hyperparameters, unlike the model’s parameters, are not learned from the data but are set before the training process begins. Choosing the right hyperparameters can lead to faster convergence and higher accuracy, making this an area that demands careful attention.

\n

Hyperparameters can be thought of as the settings or knobs that you, as the model trainer, can adjust to tailor the training process. These include learning rate, batch size, the architecture of layers, and more. One of the leading methodologies for fine-tuning models is LORA (Low-Rank Adaptation), which has gained popularity due to its efficiency and effectiveness.

\n

LORA is a technique that allows for the efficient adaptation of large language models by introducing low-rank matrices during the training process. This approach reduces the number of trainable parameters, leading to faster convergence and reduced computational costs.

\n

When using LORA, two primary hyperparameters to consider are:

\n\n

A good starting point for these parameters might be a rank of 8 and an alpha of 16, but these values should be tailored based on the model's complexity and the specific task at hand.

\n

e. Optimize context length :  Another significant aspect of model fine-tuning, especially in function-calling scenarios, is the management of context length. In these prompts, we often provide detailed information such as function names, descriptions, and argument types, which consume a substantial number of tokens. Efficiently managing this context can lead to performance gains without sacrificing accuracy.

\n

Iterative Experimentation with Context Details: To optimize context length, an iterative experimentation approach is recommended:

\n
    \n
  1. Baseline Experiment: Start by including all possible details—function descriptions, argument types, and more. This serves as your baseline for comparison.
  2. \n
  3. Simplified Contexts: Gradually remove elements from the context:
  4. \n\n
\n

By incrementally simplifying the context, you can identify the minimal necessary 
While conducting these experiments, it is advantageous to utilize previous checkpoints. Instead of starting from the base model for each iteration, use the trained model from the previous step as a starting point. This approach can save time and computational resources, allowing for more efficient experimentation.  

\n

Fine-Tuning on Azure: Step-by-Step   

\n

Now lets run the fine-tuning job while adhering to all the guidelines and instructions shared above:- 

\n

1. Create an Azure Machine Learning Workspace: An Azure Machine Learning workspace is your control center for managing all the resources you need to train, deploy, automate, and manage machine learning models. It serves as a central repository for your datasets, compute resources, and models. To get started, you can create a workspace through the Azure portal by navigating to the Azure Machine Learning service and selecting \"Create new workspace.\" Ensure you configure resource group, workspace name, region, and other necessary settings.

\n

2. Create a Compute Instance: To run your Python notebook and execute scripts, you need a compute instance. This virtual machine in Azure Machine Learning allows you to perform data preparation, training, and experimentation. Go to the \"Compute\" section in your workspace, select \"Create,\" and choose a compute instance that fits your needs, ensuring it has the necessary specifications for your workload.

\n

 3: Dataset Preparation: For this blog, we'll use the glaiveai/glaive-function-calling-v2 dataset from Hugging Face, which includes simple, multi-turn function calling and generic conversations across various domains. The dataset needs to be formatted to be compatible with the OpenAI format:

\n\ndef parse_conversation(input_string): \n \n ROLE_MAPPING = {\"USER\" : \"user\", \"ASSISTANT\" : \"assistant\", \"SYSTEM\" : \"system\", \"FUNCTION RESPONSE\" : \"tool\"}\n\n # Regular expression to split the conversation based on SYSTEM, USER, and ASSISTANT \n pattern = r\"(SYSTEM|USER|ASSISTANT|FUNCTION RESPONSE):\" \n \n # Split the input string and keep the delimiters \n parts = re.split(pattern, input_string) \n \n # Initialize the list to store conversation entries \n conversation = [] \n \n # Iterate over the parts, skipping the first empty string \n for i in range(1, len(parts), 2): \n role = parts[i].strip() \n content = parts[i + 1].strip() \n content = content.replace(\"<|endoftext|>\", \"\").strip()\n\n if content.startswith('<functioncall>'): # build structured data for function call\n # try to turn function call from raw text to structured data\n content = content.replace('<functioncall>', '').strip()\n # replace single quotes with double quotes for valid JSON\n clean_content = content.replace(\"'{\", '{').replace(\"'}\", '}')\n data_json = json.loads(clean_content)\n # Make it compatible with openAI prompt format\n func_call = {'recipient_name': f\"functions.{data_json['name']}\", 'parameters': data_json['arguments']}\n content = {'tool_uses': [func_call]}\n \n # Append a dictionary with the role and content to the conversation list \n conversation.append({\"role\": ROLE_MAPPING[role], \"content\": content}) \n \n return conversation \n\ndef prepare_dataset(tokenizer, args):\n \n # Create the cache_dir\n cache_dir = \"./outputs/dataset\"\n os.makedirs(cache_dir, exist_ok = True)\n\n # Load the dataset from disk\n train_dataset = load_from_disk(args.train_dir) \n eval_dataset = load_from_disk(args.val_dir)\n\n column_names = list(train_dataset.features)\n\n def apply_chat_template(examples):\n conversations = []\n for system, chat in zip(examples[\"system\"], examples[\"chat\"]):\n try:\n system_message = parse_conversation(system)\n chat_message = parse_conversation(chat)\n message = system_message + chat_message\n conversations.append(message)\n except Exception as e:\n print(e) \n\n text = [tokenizer.apply_chat_template(message, tokenize=False, add_generation_prompt=False) for message in conversations]\n return {\"text\": text}\n\n # process the dataseta and drop unused columns\n processed_train_dataset = train_dataset.map(apply_chat_template, cache_file_name = f\"{cache_dir}/cache.arrow\", batched = True, remove_columns=column_names)\n processed_eval_dataset = eval_dataset.map(apply_chat_template, cache_file_name = f\"{cache_dir}/cache.arrow\", batched = True, remove_columns=column_names)\n\n return processed_train_dataset, processed_eval_dataset\n\n

4: Create a Data Asset: Azure Machine Learning allows you to register datasets as data assets, making them easily manageable and reusable:

\ndef get_or_create_data_asset(ml_client, data_name, data_local_dir, update=False):\n \n try:\n latest_data_version = max([int(d.version) for d in ml_client.data.list(name=data_name)])\n if update:\n raise ResourceExistsError('Found Data asset, but will update the Data.') \n else:\n data_asset = ml_client.data.get(name=data_name, version=latest_data_version)\n logger.info(f\"Found Data asset: {data_name}. Will not create again\")\n except (ResourceNotFoundError, ResourceExistsError) as e:\n data = Data(\n path=data_local_dir,\n type=AssetTypes.URI_FOLDER,\n description=f\"{data_name} for fine tuning\",\n tags={\"FineTuningType\": \"Instruction\", \"Language\": \"En\"},\n name=data_name\n )\n data_asset = ml_client.data.create_or_update(data)\n logger.info(f\"Created/Updated Data asset: {data_name}\")\n \n return data_asset\n\ntrain_data = get_or_create_data_asset(ml_client, f\"{AZURE_DATA_NAME}_train\", data_local_dir=f\"{DATA_DIR}/train\", update=True)\nval_data = get_or_create_data_asset(ml_client, f\"{AZURE_DATA_NAME}_val\", data_local_dir=f\"{DATA_DIR}/val\", update=True)\ntest_data = get_or_create_data_asset(ml_client, f\"{AZURE_DATA_NAME}_test\", data_local_dir=f\"{DATA_DIR}/test\", update=True)\n

5: Create an Environment: While Azure provides built-in environments for common use cases, creating a custom environment tailored to your specific needs can be beneficial.

\n

An environment in Azure ML is essentially a containerized setup that defines the software, libraries, and other dependencies required to run your machine learning workload.

\n

Why Use Environments?

\n
    \n
  1. Reproducibility: By defining an environment, you ensure that your training and inference processes are reproducible, with the same configuration used every time.
  2. \n
  3. Consistency: Environments help maintain consistency across different runs and teams, reducing \"it works on my machine\" problems.
  4. \n
  5. Portability: They encapsulate your dependencies, making it easier to move and share your ML projects across different Azure services or even with external collaborators.
  6. \n
\n%%writefile {CLOUD_DIR}/train/Dockerfile\n\nFROM mcr.microsoft.com/aifx/acpt/stable-ubuntu2004-cu124-py310-torch241:biweekly.202410.2\n\nUSER root\n\n# support Deepspeed launcher requirement of passwordless ssh login\nRUN apt-get update && apt-get -y upgrade\nRUN pip install --upgrade pip\nRUN apt-get install -y openssh-server openssh-client\n\n# Install pip dependencies\nCOPY requirements.txt .\nRUN pip install -r requirements.txt --no-cache-dir\n\nRUN MAX_JOBS=4 pip install flash-attn==2.6.3 --no-build-isolationdef get_or_create_docker_environment_asset(ml_client, env_name, docker_dir, update=False):\n \n try:\n latest_env_version = max([int(e.version) for e in ml_client.environments.list(name=env_name)])\n if update:\n raise ResourceExistsError('Found Environment asset, but will update the Environment.')\n else:\n env_asset = ml_client.environments.get(name=env_name, version=latest_env_version)\n print(f\"Found Environment asset: {env_name}. Will not create again\")\n except (ResourceNotFoundError, ResourceExistsError) as e:\n print(f\"Exception: {e}\")\n env_docker_image = Environment(\n build=BuildContext(path=docker_dir),\n name=env_name,\n description=\"Environment created from a Docker context.\",\n )\n env_asset = ml_client.environments.create_or_update(env_docker_image)\n print(f\"Created Environment asset: {env_name}\")\n \n return env_asset\n\nenv = get_or_create_docker_environment_asset(ml_client, azure_env_name, docker_dir=f\"{CLOUD_DIR}/train\", update=False)\n

Reference : training.ipynb

\n

6: Create a Training Script: Your training script will handle the fine-tuning process and log metrics using MLflow, which is tightly integrated with Azure Machine Learning. This involves - Loading the dataset, defining the model architecture, writing functions to track and log metrics such as training and evaluation loss.

\ndef main(args):\n\n ###################\n # Hyper-parameters\n ###################\n # Only overwrite environ if wandb param passed\n if len(args.wandb_project) > 0:\n os.environ['WANDB_API_KEY'] = args.wandb_api_key \n os.environ[\"WANDB_PROJECT\"] = args.wandb_project\n if len(args.wandb_watch) > 0:\n os.environ[\"WANDB_WATCH\"] = args.wandb_watch\n if len(args.wandb_log_model) > 0:\n os.environ[\"WANDB_LOG_MODEL\"] = args.wandb_log_model\n \n use_wandb = len(args.wandb_project) > 0 or (\"WANDB_PROJECT\" in os.environ and len(os.environ[\"WANDB_PROJECT\"]) > 0) \n \n training_config = {\"per_device_train_batch_size\" : args.train_batch_size, # Controls the batch size per device\n \"per_device_eval_batch_size\" : args.eval_batch_size, # Controls the batch size for evaluation\n \"gradient_accumulation_steps\" : args.grad_accum_steps,\n \"warmup_ratio\" : args.warmup_ratio, # Controls the ratio of warmup steps\n \"learning_rate\" : args.learning_rate, \n \"fp16\" : not torch.cuda.is_bf16_supported(),\n \"bf16\" : torch.cuda.is_bf16_supported(),\n \"optim\" : \"adamw_8bit\",\n \"lr_scheduler_type\" : args.lr_scheduler_type,\n \"output_dir\" : args.output_dir,\n \"logging_steps\": args.logging_steps,\n \"logging_strategy\": \"epoch\",\n \"save_steps\": args.save_steps,\n \"eval_strategy\": \"epoch\",\n \"num_train_epochs\": args.epochs,\n # \"load_best_model_at_end\": True,\n \"save_only_model\": False,\n \"seed\" : 0\n }\n\n peft_config = {\n \"r\": args.lora_r,\n \"lora_alpha\": args.lora_alpha,\n \"lora_dropout\": args.lora_dropout,\n \"bias\": \"none\",\n #\"target_modules\": \"all-linear\",\n \"target_modules\": [\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\", \"gate_proj\", \"up_proj\", \"down_proj\"],\n \"modules_to_save\": None,\n \"use_gradient_checkpointing\": \"unsloth\",\n \"use_rslora\": False,\n \"loftq_config\": None,\n }\n\n checkpoint_dir = os.path.join(args.output_dir, \"checkpoints\")\n\n train_conf = TrainingArguments(\n **training_config,\n report_to=\"wandb\" if use_wandb else \"azure_ml\",\n run_name=args.wandb_run_name if use_wandb else None, \n )\n\n model, tokenizer = load_model(args)\n model = FastLanguageModel.get_peft_model(model, **peft_config)\n\n ###############\n # Setup logging\n ###############\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n handlers=[logging.StreamHandler(sys.stdout)],\n )\n log_level = train_conf.get_process_log_level()\n logger.setLevel(log_level)\n\n datasets.utils.logging.set_verbosity(log_level)\n transformers.utils.logging.set_verbosity(log_level)\n transformers.utils.logging.enable_default_handler()\n transformers.utils.logging.enable_explicit_format()\n\n # Log on each process a small summary\n logger.warning(\n f\"Process rank: {train_conf.local_rank}, device: {train_conf.device}, n_gpu: {train_conf.n_gpu}\"\n + f\" distributed training: {bool(train_conf.local_rank != -1)}, 16-bits training: {train_conf.fp16}\"\n )\n logger.info(f\"Training/evaluation parameters {train_conf}\")\n logger.info(f\"PEFT parameters {peft_config}\") \n\n # Load the dataset\n train_dataset, eval_dataset = prepare_dataset(tokenizer, args)\n\n ###########\n # Training\n ###########\n trainer = SFTTrainer(\n model=model,\n args=train_conf,\n tokenizer = tokenizer,\n train_dataset=train_dataset,\n eval_dataset=eval_dataset,\n dataset_text_field=\"text\",\n packing = False # Can make training 5x faster for shorter responses\n )\n\n # Show current memory stats\n gpu_stats = torch.cuda.get_device_properties(0)\n start_gpu_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3)\n max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3)\n logger.info(f\"GPU = {gpu_stats.name}. Max memory = {max_memory} GB.\")\n logger.info(f\"{start_gpu_memory} GB of memory reserved.\")\n\n last_checkpoint = None\n if os.path.isdir(checkpoint_dir):\n checkpoints = [os.path.join(checkpoint_dir, d) for d in os.listdir(checkpoint_dir)]\n if len(checkpoints) > 0:\n checkpoints.sort(key=os.path.getmtime, reverse=True)\n last_checkpoint = checkpoints[0] \n \n trainer_stats = trainer.train(resume_from_checkpoint=last_checkpoint)\n\n #############\n # Evaluation\n #############\n tokenizer.padding_side = \"left\"\n metrics = trainer.evaluate()\n metrics[\"eval_samples\"] = len(eval_dataset)\n trainer.log_metrics(\"eval\", metrics)\n trainer.save_metrics(\"eval\", metrics)\n \n # ############\n # # Save model\n # ############\n os.makedirs(args.model_dir, exist_ok=True)\n\n if args.save_merged_model:\n print(\"Save PEFT model with merged 16-bit weights\")\n model.save_pretrained_merged(\"outputs\", tokenizer, save_method=\"merged_16bit\")\n else:\n print(f\"Save PEFT model: {args.model_dir}/model\") \n model.save_pretrained(f\"{args.model_dir}/model\")\n\n tokenizer.save_pretrained(args.model_dir)\n

Reference : train.py

\n

7: Create the Compute Cluster: . For this experiment, we are using Standard_NC24ads_A100_v4 which has 1 GPU and 80 GB of VRAM. Select the compute based on the model size and batch size.

\nfrom azure.ai.ml.entities import AmlCompute\n\n### Create the compute cluster\ntry:\n compute = ml_client.compute.get(azure_compute_cluster_name)\n print(\"The compute cluster already exists! Reusing it for the current run\")\nexcept Exception as ex:\n print(\n f\"Looks like the compute cluster doesn't exist. Creating a new one with compute size {azure_compute_cluster_size}!\"\n )\n try:\n print(\"Attempt #1 - Trying to create a dedicated compute\")\n tier = 'LowPriority' if USE_LOWPRIORITY_VM else 'Dedicated'\n compute = AmlCompute(\n name=azure_compute_cluster_name,\n size=azure_compute_cluster_size,\n tier=tier,\n max_instances=1, # For multi node training set this to an integer value more than 1\n )\n ml_client.compute.begin_create_or_update(compute).wait()\n except Exception as e:\n print(\"Error\")\n

8: Submit the Fine-Tuning Job

\n

With everything set up, you can now submit your fine-tuning job:

\nfrom azure.ai.ml import command\nfrom azure.ai.ml import Input\nfrom azure.ai.ml.entities import ResourceConfiguration\n\njob = command(\n inputs=dict(\n #train_dir=Input(type=\"uri_folder\", path=DATA_DIR), # Get data from local path\n train_dir=Input(path=f\"{AZURE_DATA_NAME}_train@latest\"), # Get data from Data asset\n val_dir = Input(path=f\"{AZURE_DATA_NAME}_val@latest\"),\n epoch=d['train']['epoch'],\n train_batch_size=d['train']['train_batch_size'],\n eval_batch_size=d['train']['eval_batch_size'], \n ),\n code=f\"{CLOUD_DIR}/train\", # local path where the code is stored\n compute=azure_compute_cluster_name,\n command=\"python train_v3.py --train_dir ${{inputs.train_dir}} --val_dir ${{inputs.val_dir}} --train_batch_size ${{inputs.train_batch_size}} --eval_batch_size ${{inputs.eval_batch_size}}\",\n #environment=\"azureml://registries/azureml/environments/acft-hf-nlp-gpu/versions/77\", # Use built-in Environment asset\n environment=f\"{azure_env_name}@latest\",\n distribution={\n \"type\": \"PyTorch\",\n \"process_count_per_instance\": 1, # For multi-gpu training set this to an integer value more than 1\n },\n)\nreturned_job = ml_client.jobs.create_or_update(job)\nml_client.jobs.stream(returned_job.name)\n

9: Monitor Training Metrics: After initiating the job, keep an eye on the output for key metrics like training loss and evaluation loss. Since we've logged the results to MLflow, which is seamlessly integrated with Azure Machine Learning, we can easily review the loss function by navigating to the metrics tab within the jobs section.

\n\n

Key Takeways:

\n\n

Overall, it looks promising, so lets go ahead and register the model.

\n

10: Register the Model: After fine-tuning, register the model to make it available for deployment:

\nfrom azureml.core import Workspace, Run \nimport os \n \n# Connect to your workspace \nws = Workspace.from_config() \n \nexperiment_name = 'experiment_name'\nrun_id = 'job_name'\n\nrun = Run(ws.experiments[experiment_name], run_id) \n\n# Register the model \nmodel = run.register_model( \n model_name=d[\"serve\"][\"azure_model_name\"], # this is the name the model will be registered under \n model_path=\"outputs\" # this is the path to the model file in the run's outputs \n) \n# Create a local directory to save the outputs \nlocal_folder = './model_v2' \nos.makedirs(local_folder, exist_ok=True) \n \n# Download the entire outputs folder \nrun.download_files(prefix='outputs', output_directory=local_folder) \n

Step 11: Deploy the Model to a Managed Online Endpoint: Managed online endpoints provide a seamless way to deploy models without managing underlying infrastructure. They offer scalability, versioning, and easy rollback compared to deploying on an Azure Kubernetes Service (AKS) cluster.

\n

11 a. Build the enviornment: For deploying the model to managed online endpoint, first create the environment with required dependencies and webserver for inference.

\n%%writefile {CLOUD_DIR}/serve/Dockerfile\n\nFROM mcr.microsoft.com/aifx/acpt/stable-ubuntu2004-cu124-py310-torch241:biweekly.202410.2\n\n# Install pip dependencies\nCOPY requirements.txt .\nRUN pip install -r requirements.txt --no-cache-dir\n\n# Inference requirements\nCOPY --from=mcr.microsoft.com/azureml/o16n-base/python-assets:20230419.v1 /artifacts /var/\n\nRUN /var/requirements/install_system_requirements.sh && \\\n cp /var/configuration/rsyslog.conf /etc/rsyslog.conf && \\\n cp /var/configuration/nginx.conf /etc/nginx/sites-available/app && \\\n ln -sf /etc/nginx/sites-available/app /etc/nginx/sites-enabled/app && \\\n rm -f /etc/nginx/sites-enabled/default\nENV SVDIR=/var/runit\nENV WORKER_TIMEOUT=400\nEXPOSE 5001 8883 8888\n\n# support Deepspeed launcher requirement of passwordless ssh login\nRUN apt-get update\nRUN apt-get install -y openssh-server openssh-client\n\nRUN MAX_JOBS=4 pip install flash-attn==2.6.3 --no-build-isolation\n

Reference : serving.ipynb

\n

11b. Create a serving script: Creating a serve script for inference is a crucial step in deploying your machine learning model to a production environment. This script handles incoming requests, processes input data, runs the model inference, and returns the results. In Azure Machine Learning, the serve script is part of the deployment package for your model, typically used in conjunction with a managed endpoint or a Kubernetes service.

\n

A serve script in Azure ML typically consists of two main functions:

\n
    \n
  1. init(): This function initializes the model and any other necessary resources. It is called once when the deployment is first loaded.
  2. \n
  3. run(data): This function is called every time a request is made to the deployed model. It processes the incoming data, performs inference using the model, and returns the results.
  4. \n
\nimport os\nimport re\nimport json\nimport torch\nimport base64\nimport logging\n\nfrom io import BytesIO\nfrom transformers import AutoTokenizer, AutoProcessor, pipeline\nfrom transformers import AutoModelForCausalLM, AutoProcessor\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\ndef init():\n \"\"\"\n This function is called when the container is initialized/started, typically after create/update of the deployment.\n You can write the logic here to perform init operations like caching the model in memory\n \"\"\"\n global model\n global tokenizer\n # AZUREML_MODEL_DIR is an environment variable created during deployment.\n # It is the path to the model folder (./azureml-models/$MODEL_NAME/$VERSION)\n # Please provide your model's folder name if there is one\n model_name_or_path = os.path.join(\n os.getenv(\"AZUREML_MODEL_DIR\"), \"outputs\"\n )\n \n model_kwargs = dict(\n trust_remote_code=True, \n device_map={\"\":0},\n torch_dtype=\"auto\" \n )\n \n model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map ={\"\" : 0}, **model_kwargs)\n tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) \n\n logging.info(\"Loaded model.\")\n \ndef run(json_data: str):\n logging.info(\"Request received\")\n data = json.loads(json_data)\n input_data = data[\"input_data\"]\n params = data['params']\n\n pipe = pipeline(\"text-generation\", model = model, tokenizer = tokenizer)\n output = pipe(input_data, **params)\n result = output[0][\"generated_text\"]\n logging.info(f\"Generated text : {result}\")\n json_result = {\"result\" : str(result)}\n\n return json_result\n

Reference : score.py

\n

11c. Create a managed online endpoint and deploy the model to endpoint: Creating an endpoint and deploying your model on Azure Machine Learning is the final step to make your model accessible for real-time inference. This process involves setting up a service that can handle incoming requests, execute the model, and return the results.

\n

Why Create an Endpoint?

\n

An endpoint is a network-accessible interface that allows external applications or users to interact with your deployed machine learning model. Creating an endpoint is crucial for the following reasons:

\n
    \n
  1. Accessibility: Endpoints make your model accessible over the internet or within a secured network, enabling other applications, services, or users to send requests and receive responses.
  2. \n
  3. API Integration: By exposing your model as a RESTful API, endpoints facilitate integration with various applications, allowing seamless communication and data exchange.
  4. \n
  5. Load Management: An endpoint can manage requests from multiple clients, handling concurrent requests and distributing the load appropriately.
  6. \n
  7. Security: Endpoints provide mechanisms for authentication and authorization, ensuring that only authorized users can access the model.
  8. \n
  9. Scalability: Azure-managed endpoints can automatically scale based on demand, ensuring that your model can handle varying workloads without manual intervention.
  10. \n
\nfrom azure.ai.ml.entities import (\n ManagedOnlineEndpoint,\n IdentityConfiguration,\n ManagedIdentityConfiguration,\n)\n\nazure_endpoint_name = d['serve']['azure_endpoint_name']\n# Check if the endpoint already exists in the workspace\ntry:\n endpoint = ml_client.online_endpoints.get(azure_endpoint_name)\n print(\"---Endpoint already exists---\")\nexcept:\n # Create an online endpoint if it doesn't exist\n\n # Define the endpoint\n endpoint = ManagedOnlineEndpoint(\n name=azure_endpoint_name,\n description=f\"Test endpoint for {model.name}\",\n )\n\n# Trigger the endpoint creation\ntry:\n ml_client.begin_create_or_update(endpoint).wait()\n print(\"\\n---Endpoint created successfully---\\n\")\nexcept Exception as err:\n raise RuntimeError(\n f\"Endpoint creation failed. Detailed Response:\\n{err}\"\n ) from err\n

Why Deploy a Model?

\n

Deployment is the process of transferring your trained machine learning model from a development environment to a production environment where it can serve real-time predictions. Deployment is critical because:

\n
    \n
  1. Operationalization: Deployment operationalizes your model, moving it from an experimental or development phase to a live environment where it can deliver value to end-users or systems.
  2. \n
  3. Resource Allocation: Deploying a model involves configuring the necessary compute resources (such as CPU, memory, and GPUs) to ensure optimal performance during inference.
  4. \n
  5. Environment Consistency: During deployment, the model is packaged with its dependencies in a consistent environment, ensuring reproducibility and minimizing discrepancies between development and production.
  6. \n
  7. Monitoring and Maintenance: Deployment sets up the infrastructure to monitor the model's performance, usage, and health, allowing for ongoing maintenance and updates.
  8. \n
  9. Version Control: Deployment allows you to manage and update different versions of your model, providing flexibility to roll back or switch to newer versions as needed.from azure.ai.ml.entities import ( \n OnlineRequestSettings,\n CodeConfiguration,\n ManagedOnlineDeployment,\n ProbeSettings,\n Environment\n)\n\nazure_deployment_name = f\"{d['serve']['azure_deployment_name']}-v1\"\n\ndeployment = ManagedOnlineDeployment(\n name=azure_deployment_name,\n endpoint_name=azure_endpoint_name,\n model=model,\n instance_type=azure_compute_cluster_size,\n instance_count=1,\n #code_configuration=code_configuration,\n environment = env,\n scoring_script=\"score.py\",\n code_path=f\"./{CLOUD_DIR}/inference\",\n #environment_variables=deployment_env_vars,\n request_settings=OnlineRequestSettings(max_concurrent_requests_per_instance=20,\n request_timeout_ms=90000, max_queue_wait_ms=60000),\n liveness_probe=ProbeSettings(\n failure_threshold=30,\n success_threshold=1,\n period=100,\n initial_delay=500,\n ),\n readiness_probe=ProbeSettings(\n failure_threshold=30,\n success_threshold=1,\n period=100,\n initial_delay=500,\n ),\n)\n\n# Trigger the deployment creation\ntry:\n ml_client.begin_create_or_update(deployment).wait()\n print(\"\\n---Deployment created successfully---\\n\")\nexcept Exception as err:\n raise RuntimeError(\n f\"Deployment creation failed. Detailed Response:\\n{err}\"\n ) from err\n \nendpoint.traffic = {azure_deployment_name: 100}\nendpoint_poller = ml_client.online_endpoints.begin_create_or_update(endpoint)
  10. \n
\n

Step 12: Run Inference on Sample Data: Test the deployed model using sample data that expects function calls:

\nimport json\nimport os \n\nsample = {\n \"input_data\": \n [\n {'role': 'system', 'content': 'You are an helpful assistant who has access to the following functions to help the user, you can use the functions if needed- { \"name\": \"calculate_shipping_cost\", \"description\": \"Calculate the cost of shipping a package\", \"parameters\": { \"type\": \"object\", \"properties\": { \"weight\": { \"type\": \"number\", \"description\": \"The weight of the package in pounds\" }, \"destination\": { \"type\": \"string\", \"description\": \"The destination of the package\" } }, \"required\": [ \"weight\", \"destination\" ] }}}\"'},\n {'role': 'user', 'content': 'Can you help me with shipping cost for a package?'},\n {'role': 'assistant', 'content': 'Sure! I can help you with that. Please provide me with the weight and destination of the package.'},\n {'role': 'user', 'content': 'The weight of the package is 10 pounds and the destination is New York.'}\n ],\n \"params\": {\n \"temperature\": 0.1,\n \"max_new_tokens\": 512,\n \"do_sample\": True,\n \"return_full_text\": False\n }\n}\n# Dump the sample data into a json file\nwith open(request_file, \"w\") as f:\n json.dump(sample, f)\n\nresult = ml_client.online_endpoints.invoke(\n endpoint_name=azure_endpoint_name,\n deployment_name=azure_deployment_name,\n request_file=request_file\n)\n\nresult_json = json.loads(result)\nresult = result_json['result']\n\nprint(result)\n

Step 13: Compare with Base Model: Now, lets run the same sample through the base model to observe the difference in performance. As we can see, while the fine-tuned model did a perfect job of generating response with the right function and arguments, the base model struggles to generate the desired output

\n

Step 14:  Rerun the fine-tuning job by removing function descriptions from the system message: Now, lets rerun the experiment, but this time we will drop the function description from the dataset for context length optimization

\ndef remove_desc_from_prompts(data):\n system_message = data['system']\n pattern = r'\"description\":\\s*\"[^\"]*\",?\\n?' \n \n # Remove the \"description\" fields \n cleaned_string = re.sub(pattern, '\"description\":\"\",', system_message) \n\n return cleaned_string\n\n## Update the system message by removing function descriptions and argument description\ntrain_dataset = train_dataset.map(lambda x : {\"updated_system\" : remove_desc_from_prompts(x)}, remove_columns = [\"system\"])\ntest_dataset = test_dataset.map(lambda x : {\"updated_system\" : remove_desc_from_prompts(x)}, remove_columns = [\"system\"])\nval_dataset = val_dataset.map(lambda x : {\"updated_system\" : remove_desc_from_prompts(x)}, remove_columns = [\"system\"])\n\ntrain_dataset.save_to_disk(f\"{DATA_DIR}/train\")\ntest_dataset.save_to_disk(f\"{DATA_DIR}/test\")\nval_dataset.save_to_disk(f\"{DATA_DIR}/val\")\n

Reference : preprocess.py

\n

As can be seen from the results, removing the function description doesn't degrade the model performance but instead this fine-tuned model version requires lesser input tokens resulting in a significant reduction in token consumption with improved latency.

\n\n

Step 15: Further Exploration: Consider removing arguments or even the function itself in subsequent experiments to evaluate performance.

\n

Conclusion

\n

This blog post has walked through the process of fine-tuning an SLM for function-calling on Azure Machine Learning. By following these steps, you can effectively tailor a model to meet specific functional requirements.

\n

You can access the full code here.

\n

For a deeper dive into evaluating fine-tuned models, including metrics and code samples, check out the next blog post. By leveraging Azure's powerful tools, you can streamline the development and deployment of machine learning models, making them more efficient and effective for your specific tasks.

\n

Reference:

\n

Fine tuning for function calling | OpenAI Cookbook

\n

Fine-tuning function calls with Azure OpenAI Service - Azure AI services | Microsoft Learn

\n

michaelnny/Llama3-FunctionCalling: Fine-tune Llama3 model to support function calling

\n

Fine Tuning LLMs for Function Calling w/Pawel Garbacki - YouTube

\n

slm-innovator-lab/2_slm-fine-tuning-mlstudio at main · Azure/slm-innovator-lab

","kudosSumWeight":0,"postTime":"2025-01-08T05:10:23.478-08:00","images":{"__typename":"AssociatedImageConnection","edges":[{"__typename":"AssociatedImageEdge","cursor":"MjQuMTF8Mi4xfG98MjV8X05WX3wx","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MzYyNTM5LVZtMGoyQw?revision=4\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjQuMTF8Mi4xfG98MjV8X05WX3wy","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MzYyNTM5LTZqalZzUA?revision=4\"}"}}],"totalCount":2,"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}},"attachments":{"__typename":"AttachmentConnection","pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null},"edges":[]},"tags":{"__typename":"TagConnection","pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null},"edges":[{"__typename":"TagEdge","cursor":"MjQuMTF8Mi4xfG98MTB8X05WX3wx","node":{"__typename":"Tag","id":"tag:artificial intelligence","text":"artificial intelligence","time":"2018-02-28T01:21:24.829-08:00","lastActivityTime":null,"messagesCount":null,"followersCount":null}},{"__typename":"TagEdge","cursor":"MjQuMTF8Mi4xfG98MTB8X05WX3wy","node":{"__typename":"Tag","id":"tag:azure ai studio","text":"azure ai studio","time":"2023-11-11T00:57:52.231-08:00","lastActivityTime":null,"messagesCount":null,"followersCount":null}},{"__typename":"TagEdge","cursor":"MjQuMTF8Mi4xfG98MTB8X05WX3wz","node":{"__typename":"Tag","id":"tag:azure machine learning","text":"azure machine learning","time":"2016-09-06T11:34:30.244-07:00","lastActivityTime":null,"messagesCount":null,"followersCount":null}},{"__typename":"TagEdge","cursor":"MjQuMTF8Mi4xfG98MTB8X05WX3w0","node":{"__typename":"Tag","id":"tag:natural language processing","text":"natural language processing","time":"2022-06-10T14:23:41.201-07:00","lastActivityTime":null,"messagesCount":null,"followersCount":null}}]},"timeToRead":21,"rawTeaser":"","introduction":"","coverImage":null,"coverImageProperties":{"__typename":"CoverImageProperties","style":"STANDARD","titlePosition":"BOTTOM","altText":""},"currentRevision":{"__ref":"Revision:revision:4362539_4"},"latestVersion":{"__typename":"FriendlyVersion","major":"2","minor":"0"},"metrics":{"__typename":"MessageMetrics","views":385},"visibilityScope":"PUBLIC","canonicalUrl":"","seoTitle":"","seoDescription":null,"placeholder":false,"originalMessageForPlaceholder":null,"contributors":{"__typename":"UserConnection","edges":[]},"nonCoAuthorContributors":{"__typename":"UserConnection","edges":[]},"coAuthors":{"__typename":"UserConnection","edges":[]},"blogMessagePolicies":{"__typename":"BlogMessagePolicies","canDoAuthoringActionsOnBlog":{"__typename":"PolicyResult","failureReason":{"__typename":"FailureReason","message":"error.lithium.policies.blog.action_can_do_authoring_action.accessDenied","key":"error.lithium.policies.blog.action_can_do_authoring_action.accessDenied","args":[]}}},"archivalData":null,"replies":{"__typename":"MessageConnection","edges":[],"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}},"customFields":[],"revisions({\"constraints\":{\"isPublished\":{\"eq\":true}},\"first\":1})":{"__typename":"RevisionConnection","totalCount":4}},"Conversation:conversation:4362539":{"__typename":"Conversation","id":"conversation:4362539","solved":false,"topic":{"__ref":"BlogTopicMessage:message:4362539"},"lastPostingActivityTime":"2025-01-08T07:29:33.988-08:00","lastPostTime":"2025-01-08T05:10:23.478-08:00","unreadReplyCount":0,"isSubscribed":false},"ModerationData:moderation_data:4362539":{"__typename":"ModerationData","id":"moderation_data:4362539","status":"APPROVED","rejectReason":null,"isReportedAbuse":false,"rejectUser":null,"rejectTime":null,"rejectActorType":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MzYyNTM5LVZtMGoyQw?revision=4\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MzYyNTM5LVZtMGoyQw?revision=4","title":"image.png","associationType":"BODY","width":1615,"height":562,"altText":""},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MzYyNTM5LTZqalZzUA?revision=4\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS00MzYyNTM5LTZqalZzUA?revision=4","title":"image.png","associationType":"BODY","width":1609,"height":538,"altText":""},"Revision:revision:4362539_4":{"__typename":"Revision","id":"revision:4362539_4","lastEditTime":"2025-01-08T07:29:33.988-08:00"},"CachedAsset:theme:customTheme1-1737128933984":{"__typename":"CachedAsset","id":"theme:customTheme1-1737128933984","value":{"id":"customTheme1","animation":{"fast":"150ms","normal":"250ms","slow":"500ms","slowest":"750ms","function":"cubic-bezier(0.07, 0.91, 0.51, 1)","__typename":"AnimationThemeSettings"},"avatar":{"borderRadius":"50%","collections":["default"],"__typename":"AvatarThemeSettings"},"basics":{"browserIcon":{"imageAssetName":"favicon-1730836283320.png","imageLastModified":"1730836286415","__typename":"ThemeAsset"},"customerLogo":{"imageAssetName":"favicon-1730836271365.png","imageLastModified":"1730836274203","__typename":"ThemeAsset"},"maximumWidthOfPageContent":"1300px","oneColumnNarrowWidth":"800px","gridGutterWidthMd":"30px","gridGutterWidthXs":"10px","pageWidthStyle":"WIDTH_OF_BROWSER","__typename":"BasicsThemeSettings"},"buttons":{"borderRadiusSm":"3px","borderRadius":"3px","borderRadiusLg":"5px","paddingY":"5px","paddingYLg":"7px","paddingYHero":"var(--lia-bs-btn-padding-y-lg)","paddingX":"12px","paddingXLg":"16px","paddingXHero":"60px","fontStyle":"NORMAL","fontWeight":"700","textTransform":"NONE","disabledOpacity":0.5,"primaryTextColor":"var(--lia-bs-white)","primaryTextHoverColor":"var(--lia-bs-white)","primaryTextActiveColor":"var(--lia-bs-white)","primaryBgColor":"var(--lia-bs-primary)","primaryBgHoverColor":"hsl(var(--lia-bs-primary-h), var(--lia-bs-primary-s), calc(var(--lia-bs-primary-l) * 0.85))","primaryBgActiveColor":"hsl(var(--lia-bs-primary-h), var(--lia-bs-primary-s), calc(var(--lia-bs-primary-l) * 0.7))","primaryBorder":"1px solid transparent","primaryBorderHover":"1px solid transparent","primaryBorderActive":"1px solid transparent","primaryBorderFocus":"1px solid var(--lia-bs-white)","primaryBoxShadowFocus":"0 0 0 1px var(--lia-bs-primary), 0 0 0 4px hsla(var(--lia-bs-primary-h), var(--lia-bs-primary-s), var(--lia-bs-primary-l), 0.2)","secondaryTextColor":"var(--lia-bs-gray-900)","secondaryTextHoverColor":"hsl(var(--lia-bs-gray-900-h), var(--lia-bs-gray-900-s), calc(var(--lia-bs-gray-900-l) * 0.95))","secondaryTextActiveColor":"hsl(var(--lia-bs-gray-900-h), var(--lia-bs-gray-900-s), calc(var(--lia-bs-gray-900-l) * 0.9))","secondaryBgColor":"var(--lia-bs-gray-200)","secondaryBgHoverColor":"hsl(var(--lia-bs-gray-200-h), var(--lia-bs-gray-200-s), calc(var(--lia-bs-gray-200-l) * 0.96))","secondaryBgActiveColor":"hsl(var(--lia-bs-gray-200-h), var(--lia-bs-gray-200-s), calc(var(--lia-bs-gray-200-l) * 0.92))","secondaryBorder":"1px solid transparent","secondaryBorderHover":"1px solid transparent","secondaryBorderActive":"1px solid transparent","secondaryBorderFocus":"1px solid transparent","secondaryBoxShadowFocus":"0 0 0 1px var(--lia-bs-primary), 0 0 0 4px hsla(var(--lia-bs-primary-h), var(--lia-bs-primary-s), var(--lia-bs-primary-l), 0.2)","tertiaryTextColor":"var(--lia-bs-gray-900)","tertiaryTextHoverColor":"hsl(var(--lia-bs-gray-900-h), var(--lia-bs-gray-900-s), calc(var(--lia-bs-gray-900-l) * 0.95))","tertiaryTextActiveColor":"hsl(var(--lia-bs-gray-900-h), var(--lia-bs-gray-900-s), calc(var(--lia-bs-gray-900-l) * 0.9))","tertiaryBgColor":"transparent","tertiaryBgHoverColor":"transparent","tertiaryBgActiveColor":"hsla(var(--lia-bs-black-h), var(--lia-bs-black-s), var(--lia-bs-black-l), 0.04)","tertiaryBorder":"1px solid transparent","tertiaryBorderHover":"1px solid hsla(var(--lia-bs-black-h), var(--lia-bs-black-s), var(--lia-bs-black-l), 0.08)","tertiaryBorderActive":"1px solid transparent","tertiaryBorderFocus":"1px solid transparent","tertiaryBoxShadowFocus":"0 0 0 1px var(--lia-bs-primary), 0 0 0 4px hsla(var(--lia-bs-primary-h), var(--lia-bs-primary-s), var(--lia-bs-primary-l), 0.2)","destructiveTextColor":"var(--lia-bs-danger)","destructiveTextHoverColor":"hsl(var(--lia-bs-danger-h), var(--lia-bs-danger-s), calc(var(--lia-bs-danger-l) * 0.95))","destructiveTextActiveColor":"hsl(var(--lia-bs-danger-h), var(--lia-bs-danger-s), calc(var(--lia-bs-danger-l) * 0.9))","destructiveBgColor":"var(--lia-bs-gray-200)","destructiveBgHoverColor":"hsl(var(--lia-bs-gray-200-h), var(--lia-bs-gray-200-s), calc(var(--lia-bs-gray-200-l) * 0.96))","destructiveBgActiveColor":"hsl(var(--lia-bs-gray-200-h), var(--lia-bs-gray-200-s), calc(var(--lia-bs-gray-200-l) * 0.92))","destructiveBorder":"1px solid transparent","destructiveBorderHover":"1px solid transparent","destructiveBorderActive":"1px solid transparent","destructiveBorderFocus":"1px solid transparent","destructiveBoxShadowFocus":"0 0 0 1px var(--lia-bs-primary), 0 0 0 4px hsla(var(--lia-bs-primary-h), var(--lia-bs-primary-s), var(--lia-bs-primary-l), 0.2)","__typename":"ButtonsThemeSettings"},"border":{"color":"hsla(var(--lia-bs-black-h), var(--lia-bs-black-s), var(--lia-bs-black-l), 0.08)","mainContent":"NONE","sideContent":"LIGHT","radiusSm":"3px","radius":"5px","radiusLg":"9px","radius50":"100vw","__typename":"BorderThemeSettings"},"boxShadow":{"xs":"0 0 0 1px hsla(var(--lia-bs-gray-900-h), var(--lia-bs-gray-900-s), var(--lia-bs-gray-900-l), 0.08), 0 3px 0 -1px hsla(var(--lia-bs-gray-900-h), var(--lia-bs-gray-900-s), var(--lia-bs-gray-900-l), 0.16)","sm":"0 2px 4px hsla(var(--lia-bs-gray-900-h), var(--lia-bs-gray-900-s), var(--lia-bs-gray-900-l), 0.12)","md":"0 5px 15px hsla(var(--lia-bs-gray-900-h), var(--lia-bs-gray-900-s), var(--lia-bs-gray-900-l), 0.3)","lg":"0 10px 30px hsla(var(--lia-bs-gray-900-h), var(--lia-bs-gray-900-s), var(--lia-bs-gray-900-l), 0.3)","__typename":"BoxShadowThemeSettings"},"cards":{"bgColor":"var(--lia-panel-bg-color)","borderRadius":"var(--lia-panel-border-radius)","boxShadow":"var(--lia-box-shadow-xs)","__typename":"CardsThemeSettings"},"chip":{"maxWidth":"300px","height":"30px","__typename":"ChipThemeSettings"},"coreTypes":{"defaultMessageLinkColor":"var(--lia-bs-link-color)","defaultMessageLinkDecoration":"none","defaultMessageLinkFontStyle":"NORMAL","defaultMessageLinkFontWeight":"400","defaultMessageFontStyle":"NORMAL","defaultMessageFontWeight":"400","forumColor":"#4099E2","forumFontFamily":"var(--lia-bs-font-family-base)","forumFontWeight":"var(--lia-default-message-font-weight)","forumLineHeight":"var(--lia-bs-line-height-base)","forumFontStyle":"var(--lia-default-message-font-style)","forumMessageLinkColor":"var(--lia-default-message-link-color)","forumMessageLinkDecoration":"var(--lia-default-message-link-decoration)","forumMessageLinkFontStyle":"var(--lia-default-message-link-font-style)","forumMessageLinkFontWeight":"var(--lia-default-message-link-font-weight)","forumSolvedColor":"#148563","blogColor":"#1CBAA0","blogFontFamily":"var(--lia-bs-font-family-base)","blogFontWeight":"var(--lia-default-message-font-weight)","blogLineHeight":"1.75","blogFontStyle":"var(--lia-default-message-font-style)","blogMessageLinkColor":"var(--lia-default-message-link-color)","blogMessageLinkDecoration":"var(--lia-default-message-link-decoration)","blogMessageLinkFontStyle":"var(--lia-default-message-link-font-style)","blogMessageLinkFontWeight":"var(--lia-default-message-link-font-weight)","tkbColor":"#4C6B90","tkbFontFamily":"var(--lia-bs-font-family-base)","tkbFontWeight":"var(--lia-default-message-font-weight)","tkbLineHeight":"1.75","tkbFontStyle":"var(--lia-default-message-font-style)","tkbMessageLinkColor":"var(--lia-default-message-link-color)","tkbMessageLinkDecoration":"var(--lia-default-message-link-decoration)","tkbMessageLinkFontStyle":"var(--lia-default-message-link-font-style)","tkbMessageLinkFontWeight":"var(--lia-default-message-link-font-weight)","qandaColor":"#4099E2","qandaFontFamily":"var(--lia-bs-font-family-base)","qandaFontWeight":"var(--lia-default-message-font-weight)","qandaLineHeight":"var(--lia-bs-line-height-base)","qandaFontStyle":"var(--lia-default-message-link-font-style)","qandaMessageLinkColor":"var(--lia-default-message-link-color)","qandaMessageLinkDecoration":"var(--lia-default-message-link-decoration)","qandaMessageLinkFontStyle":"var(--lia-default-message-link-font-style)","qandaMessageLinkFontWeight":"var(--lia-default-message-link-font-weight)","qandaSolvedColor":"#3FA023","ideaColor":"#FF8000","ideaFontFamily":"var(--lia-bs-font-family-base)","ideaFontWeight":"var(--lia-default-message-font-weight)","ideaLineHeight":"var(--lia-bs-line-height-base)","ideaFontStyle":"var(--lia-default-message-font-style)","ideaMessageLinkColor":"var(--lia-default-message-link-color)","ideaMessageLinkDecoration":"var(--lia-default-message-link-decoration)","ideaMessageLinkFontStyle":"var(--lia-default-message-link-font-style)","ideaMessageLinkFontWeight":"var(--lia-default-message-link-font-weight)","contestColor":"#FCC845","contestFontFamily":"var(--lia-bs-font-family-base)","contestFontWeight":"var(--lia-default-message-font-weight)","contestLineHeight":"var(--lia-bs-line-height-base)","contestFontStyle":"var(--lia-default-message-link-font-style)","contestMessageLinkColor":"var(--lia-default-message-link-color)","contestMessageLinkDecoration":"var(--lia-default-message-link-decoration)","contestMessageLinkFontStyle":"ITALIC","contestMessageLinkFontWeight":"var(--lia-default-message-link-font-weight)","occasionColor":"#D13A1F","occasionFontFamily":"var(--lia-bs-font-family-base)","occasionFontWeight":"var(--lia-default-message-font-weight)","occasionLineHeight":"var(--lia-bs-line-height-base)","occasionFontStyle":"var(--lia-default-message-font-style)","occasionMessageLinkColor":"var(--lia-default-message-link-color)","occasionMessageLinkDecoration":"var(--lia-default-message-link-decoration)","occasionMessageLinkFontStyle":"var(--lia-default-message-link-font-style)","occasionMessageLinkFontWeight":"var(--lia-default-message-link-font-weight)","grouphubColor":"#333333","categoryColor":"#949494","communityColor":"#FFFFFF","productColor":"#949494","__typename":"CoreTypesThemeSettings"},"colors":{"black":"#000000","white":"#FFFFFF","gray100":"#F7F7F7","gray200":"#F7F7F7","gray300":"#E8E8E8","gray400":"#D9D9D9","gray500":"#CCCCCC","gray600":"#717171","gray700":"#707070","gray800":"#545454","gray900":"#333333","dark":"#545454","light":"#F7F7F7","primary":"#0069D4","secondary":"#333333","bodyText":"#333333","bodyBg":"#FFFFFF","info":"#409AE2","success":"#41C5AE","warning":"#FCC844","danger":"#BC341B","alertSystem":"#FF6600","textMuted":"#707070","highlight":"#FFFCAD","outline":"var(--lia-bs-primary)","custom":["#D3F5A4","#243A5E"],"__typename":"ColorsThemeSettings"},"divider":{"size":"3px","marginLeft":"4px","marginRight":"4px","borderRadius":"50%","bgColor":"var(--lia-bs-gray-600)","bgColorActive":"var(--lia-bs-gray-600)","__typename":"DividerThemeSettings"},"dropdown":{"fontSize":"var(--lia-bs-font-size-sm)","borderColor":"var(--lia-bs-border-color)","borderRadius":"var(--lia-bs-border-radius-sm)","dividerBg":"var(--lia-bs-gray-300)","itemPaddingY":"5px","itemPaddingX":"20px","headerColor":"var(--lia-bs-gray-700)","__typename":"DropdownThemeSettings"},"email":{"link":{"color":"#0069D4","hoverColor":"#0061c2","decoration":"none","hoverDecoration":"underline","__typename":"EmailLinkSettings"},"border":{"color":"#e4e4e4","__typename":"EmailBorderSettings"},"buttons":{"borderRadiusLg":"5px","paddingXLg":"16px","paddingYLg":"7px","fontWeight":"700","primaryTextColor":"#ffffff","primaryTextHoverColor":"#ffffff","primaryBgColor":"#0069D4","primaryBgHoverColor":"#005cb8","primaryBorder":"1px solid transparent","primaryBorderHover":"1px solid transparent","__typename":"EmailButtonsSettings"},"panel":{"borderRadius":"5px","borderColor":"#e4e4e4","__typename":"EmailPanelSettings"},"__typename":"EmailThemeSettings"},"emoji":{"skinToneDefault":"#ffcd43","skinToneLight":"#fae3c5","skinToneMediumLight":"#e2cfa5","skinToneMedium":"#daa478","skinToneMediumDark":"#a78058","skinToneDark":"#5e4d43","__typename":"EmojiThemeSettings"},"heading":{"color":"var(--lia-bs-body-color)","fontFamily":"Segoe UI","fontStyle":"NORMAL","fontWeight":"400","h1FontSize":"34px","h2FontSize":"32px","h3FontSize":"28px","h4FontSize":"24px","h5FontSize":"20px","h6FontSize":"16px","lineHeight":"1.3","subHeaderFontSize":"11px","subHeaderFontWeight":"500","h1LetterSpacing":"normal","h2LetterSpacing":"normal","h3LetterSpacing":"normal","h4LetterSpacing":"normal","h5LetterSpacing":"normal","h6LetterSpacing":"normal","subHeaderLetterSpacing":"2px","h1FontWeight":"var(--lia-bs-headings-font-weight)","h2FontWeight":"var(--lia-bs-headings-font-weight)","h3FontWeight":"var(--lia-bs-headings-font-weight)","h4FontWeight":"var(--lia-bs-headings-font-weight)","h5FontWeight":"var(--lia-bs-headings-font-weight)","h6FontWeight":"var(--lia-bs-headings-font-weight)","__typename":"HeadingThemeSettings"},"icons":{"size10":"10px","size12":"12px","size14":"14px","size16":"16px","size20":"20px","size24":"24px","size30":"30px","size40":"40px","size50":"50px","size60":"60px","size80":"80px","size120":"120px","size160":"160px","__typename":"IconsThemeSettings"},"imagePreview":{"bgColor":"var(--lia-bs-gray-900)","titleColor":"var(--lia-bs-white)","controlColor":"var(--lia-bs-white)","controlBgColor":"var(--lia-bs-gray-800)","__typename":"ImagePreviewThemeSettings"},"input":{"borderColor":"var(--lia-bs-gray-600)","disabledColor":"var(--lia-bs-gray-600)","focusBorderColor":"var(--lia-bs-primary)","labelMarginBottom":"10px","btnFontSize":"var(--lia-bs-font-size-sm)","focusBoxShadow":"0 0 0 3px hsla(var(--lia-bs-primary-h), var(--lia-bs-primary-s), var(--lia-bs-primary-l), 0.2)","checkLabelMarginBottom":"2px","checkboxBorderRadius":"3px","borderRadiusSm":"var(--lia-bs-border-radius-sm)","borderRadius":"var(--lia-bs-border-radius)","borderRadiusLg":"var(--lia-bs-border-radius-lg)","formTextMarginTop":"4px","textAreaBorderRadius":"var(--lia-bs-border-radius)","activeFillColor":"var(--lia-bs-primary)","__typename":"InputThemeSettings"},"loading":{"dotDarkColor":"hsla(var(--lia-bs-black-h), var(--lia-bs-black-s), var(--lia-bs-black-l), 0.2)","dotLightColor":"hsla(var(--lia-bs-white-h), var(--lia-bs-white-s), var(--lia-bs-white-l), 0.5)","barDarkColor":"hsla(var(--lia-bs-black-h), var(--lia-bs-black-s), var(--lia-bs-black-l), 0.06)","barLightColor":"hsla(var(--lia-bs-white-h), var(--lia-bs-white-s), var(--lia-bs-white-l), 0.4)","__typename":"LoadingThemeSettings"},"link":{"color":"var(--lia-bs-primary)","hoverColor":"hsl(var(--lia-bs-primary-h), var(--lia-bs-primary-s), calc(var(--lia-bs-primary-l) - 10%))","decoration":"none","hoverDecoration":"underline","__typename":"LinkThemeSettings"},"listGroup":{"itemPaddingY":"15px","itemPaddingX":"15px","borderColor":"var(--lia-bs-gray-300)","__typename":"ListGroupThemeSettings"},"modal":{"contentTextColor":"var(--lia-bs-body-color)","contentBg":"var(--lia-bs-white)","backgroundBg":"var(--lia-bs-black)","smSize":"440px","mdSize":"760px","lgSize":"1080px","backdropOpacity":0.3,"contentBoxShadowXs":"var(--lia-bs-box-shadow-sm)","contentBoxShadow":"var(--lia-bs-box-shadow)","headerFontWeight":"700","__typename":"ModalThemeSettings"},"navbar":{"position":"FIXED","background":{"attachment":null,"clip":null,"color":"var(--lia-bs-white)","imageAssetName":"","imageLastModified":"0","origin":null,"position":"CENTER_CENTER","repeat":"NO_REPEAT","size":"COVER","__typename":"BackgroundProps"},"backgroundOpacity":0.8,"paddingTop":"15px","paddingBottom":"15px","borderBottom":"1px solid var(--lia-bs-border-color)","boxShadow":"var(--lia-bs-box-shadow-sm)","brandMarginRight":"30px","brandMarginRightSm":"10px","brandLogoHeight":"30px","linkGap":"10px","linkJustifyContent":"flex-start","linkPaddingY":"5px","linkPaddingX":"10px","linkDropdownPaddingY":"9px","linkDropdownPaddingX":"var(--lia-nav-link-px)","linkColor":"var(--lia-bs-body-color)","linkHoverColor":"var(--lia-bs-primary)","linkFontSize":"var(--lia-bs-font-size-sm)","linkFontStyle":"NORMAL","linkFontWeight":"400","linkTextTransform":"NONE","linkLetterSpacing":"normal","linkBorderRadius":"var(--lia-bs-border-radius-sm)","linkBgColor":"transparent","linkBgHoverColor":"transparent","linkBorder":"none","linkBorderHover":"none","linkBoxShadow":"none","linkBoxShadowHover":"none","linkTextBorderBottom":"none","linkTextBorderBottomHover":"none","dropdownPaddingTop":"10px","dropdownPaddingBottom":"15px","dropdownPaddingX":"10px","dropdownMenuOffset":"2px","dropdownDividerMarginTop":"10px","dropdownDividerMarginBottom":"10px","dropdownBorderColor":"hsla(var(--lia-bs-black-h), var(--lia-bs-black-s), var(--lia-bs-black-l), 0.08)","controllerBgHoverColor":"hsla(var(--lia-bs-black-h), var(--lia-bs-black-s), var(--lia-bs-black-l), 0.1)","controllerIconColor":"var(--lia-bs-body-color)","controllerIconHoverColor":"var(--lia-bs-body-color)","controllerTextColor":"var(--lia-nav-controller-icon-color)","controllerTextHoverColor":"var(--lia-nav-controller-icon-hover-color)","controllerHighlightColor":"hsla(30, 100%, 50%)","controllerHighlightTextColor":"var(--lia-yiq-light)","controllerBorderRadius":"var(--lia-border-radius-50)","hamburgerColor":"var(--lia-nav-controller-icon-color)","hamburgerHoverColor":"var(--lia-nav-controller-icon-color)","hamburgerBgColor":"transparent","hamburgerBgHoverColor":"transparent","hamburgerBorder":"none","hamburgerBorderHover":"none","collapseMenuMarginLeft":"20px","collapseMenuDividerBg":"var(--lia-nav-link-color)","collapseMenuDividerOpacity":0.16,"__typename":"NavbarThemeSettings"},"pager":{"textColor":"var(--lia-bs-link-color)","textFontWeight":"var(--lia-font-weight-md)","textFontSize":"var(--lia-bs-font-size-sm)","__typename":"PagerThemeSettings"},"panel":{"bgColor":"var(--lia-bs-white)","borderRadius":"var(--lia-bs-border-radius)","borderColor":"var(--lia-bs-border-color)","boxShadow":"none","__typename":"PanelThemeSettings"},"popover":{"arrowHeight":"8px","arrowWidth":"16px","maxWidth":"300px","minWidth":"100px","headerBg":"var(--lia-bs-white)","borderColor":"var(--lia-bs-border-color)","borderRadius":"var(--lia-bs-border-radius)","boxShadow":"0 0.5rem 1rem hsla(var(--lia-bs-black-h), var(--lia-bs-black-s), var(--lia-bs-black-l), 0.15)","__typename":"PopoverThemeSettings"},"prism":{"color":"#000000","bgColor":"#f5f2f0","fontFamily":"var(--font-family-monospace)","fontSize":"var(--lia-bs-font-size-base)","fontWeightBold":"var(--lia-bs-font-weight-bold)","fontStyleItalic":"italic","tabSize":2,"highlightColor":"#b3d4fc","commentColor":"#62707e","punctuationColor":"#6f6f6f","namespaceOpacity":"0.7","propColor":"#990055","selectorColor":"#517a00","operatorColor":"#906736","operatorBgColor":"hsla(0, 0%, 100%, 0.5)","keywordColor":"#0076a9","functionColor":"#d3284b","variableColor":"#c14700","__typename":"PrismThemeSettings"},"rte":{"bgColor":"var(--lia-bs-white)","borderRadius":"var(--lia-panel-border-radius)","boxShadow":" var(--lia-panel-box-shadow)","customColor1":"#bfedd2","customColor2":"#fbeeb8","customColor3":"#f8cac6","customColor4":"#eccafa","customColor5":"#c2e0f4","customColor6":"#2dc26b","customColor7":"#f1c40f","customColor8":"#e03e2d","customColor9":"#b96ad9","customColor10":"#3598db","customColor11":"#169179","customColor12":"#e67e23","customColor13":"#ba372a","customColor14":"#843fa1","customColor15":"#236fa1","customColor16":"#ecf0f1","customColor17":"#ced4d9","customColor18":"#95a5a6","customColor19":"#7e8c8d","customColor20":"#34495e","customColor21":"#000000","customColor22":"#ffffff","defaultMessageHeaderMarginTop":"40px","defaultMessageHeaderMarginBottom":"20px","defaultMessageItemMarginTop":"0","defaultMessageItemMarginBottom":"10px","diffAddedColor":"hsla(170, 53%, 51%, 0.4)","diffChangedColor":"hsla(43, 97%, 63%, 0.4)","diffNoneColor":"hsla(0, 0%, 80%, 0.4)","diffRemovedColor":"hsla(9, 74%, 47%, 0.4)","specialMessageHeaderMarginTop":"40px","specialMessageHeaderMarginBottom":"20px","specialMessageItemMarginTop":"0","specialMessageItemMarginBottom":"10px","__typename":"RteThemeSettings"},"tags":{"bgColor":"var(--lia-bs-gray-200)","bgHoverColor":"var(--lia-bs-gray-400)","borderRadius":"var(--lia-bs-border-radius-sm)","color":"var(--lia-bs-body-color)","hoverColor":"var(--lia-bs-body-color)","fontWeight":"var(--lia-font-weight-md)","fontSize":"var(--lia-font-size-xxs)","textTransform":"UPPERCASE","letterSpacing":"0.5px","__typename":"TagsThemeSettings"},"toasts":{"borderRadius":"var(--lia-bs-border-radius)","paddingX":"12px","__typename":"ToastsThemeSettings"},"typography":{"fontFamilyBase":"Segoe UI","fontStyleBase":"NORMAL","fontWeightBase":"400","fontWeightLight":"300","fontWeightNormal":"400","fontWeightMd":"500","fontWeightBold":"700","letterSpacingSm":"normal","letterSpacingXs":"normal","lineHeightBase":"1.5","fontSizeBase":"16px","fontSizeXxs":"11px","fontSizeXs":"12px","fontSizeSm":"14px","fontSizeLg":"20px","fontSizeXl":"24px","smallFontSize":"14px","customFonts":[{"source":"SERVER","name":"Segoe UI","styles":[{"style":"NORMAL","weight":"400","__typename":"FontStyleData"},{"style":"NORMAL","weight":"300","__typename":"FontStyleData"},{"style":"NORMAL","weight":"600","__typename":"FontStyleData"},{"style":"NORMAL","weight":"700","__typename":"FontStyleData"},{"style":"ITALIC","weight":"400","__typename":"FontStyleData"}],"assetNames":["SegoeUI-normal-400.woff2","SegoeUI-normal-300.woff2","SegoeUI-normal-600.woff2","SegoeUI-normal-700.woff2","SegoeUI-italic-400.woff2"],"__typename":"CustomFont"},{"source":"SERVER","name":"MWF Fluent Icons","styles":[{"style":"NORMAL","weight":"400","__typename":"FontStyleData"}],"assetNames":["MWFFluentIcons-normal-400.woff2"],"__typename":"CustomFont"}],"__typename":"TypographyThemeSettings"},"unstyledListItem":{"marginBottomSm":"5px","marginBottomMd":"10px","marginBottomLg":"15px","marginBottomXl":"20px","marginBottomXxl":"25px","__typename":"UnstyledListItemThemeSettings"},"yiq":{"light":"#ffffff","dark":"#000000","__typename":"YiqThemeSettings"},"colorLightness":{"primaryDark":0.36,"primaryLight":0.74,"primaryLighter":0.89,"primaryLightest":0.95,"infoDark":0.39,"infoLight":0.72,"infoLighter":0.85,"infoLightest":0.93,"successDark":0.24,"successLight":0.62,"successLighter":0.8,"successLightest":0.91,"warningDark":0.39,"warningLight":0.68,"warningLighter":0.84,"warningLightest":0.93,"dangerDark":0.41,"dangerLight":0.72,"dangerLighter":0.89,"dangerLightest":0.95,"__typename":"ColorLightnessThemeSettings"},"localOverride":false,"__typename":"Theme"},"localOverride":false},"CachedAsset:text:en_US-components/common/EmailVerification-1737128950293":{"__typename":"CachedAsset","id":"text:en_US-components/common/EmailVerification-1737128950293","value":{"email.verification.title":"Email Verification Required","email.verification.message.update.email":"To participate in the community, you must first verify your email address. The verification email was sent to {email}. To change your email, visit My Settings.","email.verification.message.resend.email":"To participate in the community, you must first verify your email address. The verification email was sent to {email}. Resend email."},"localOverride":false},"CachedAsset:text:en_US-shared/client/components/common/Loading/LoadingDot-1737128950293":{"__typename":"CachedAsset","id":"text:en_US-shared/client/components/common/Loading/LoadingDot-1737128950293","value":{"title":"Loading..."},"localOverride":false},"CachedAsset:quilt:o365.prod:pages/blogs/BlogMessagePage:board:MachineLearningBlog-1737128946209":{"__typename":"CachedAsset","id":"quilt:o365.prod:pages/blogs/BlogMessagePage:board:MachineLearningBlog-1737128946209","value":{"id":"BlogMessagePage","container":{"id":"Common","headerProps":{"backgroundImageProps":null,"backgroundColor":null,"addComponents":null,"removeComponents":["community.widget.bannerWidget"],"componentOrder":null,"__typename":"QuiltContainerSectionProps"},"headerComponentProps":{"community.widget.breadcrumbWidget":{"disableLastCrumbForDesktop":false}},"footerProps":null,"footerComponentProps":null,"items":[{"id":"blog-article","layout":"ONE_COLUMN","bgColor":null,"showTitle":null,"showDescription":null,"textPosition":null,"textColor":null,"sectionEditLevel":"LOCKED","bgImage":null,"disableSpacing":null,"edgeToEdgeDisplay":null,"fullHeight":null,"showBorder":null,"__typename":"OneColumnQuiltSection","columnMap":{"main":[{"id":"blogs.widget.blogArticleWidget","className":"lia-blog-container","props":null,"__typename":"QuiltComponent"}],"__typename":"OneSectionColumns"}},{"id":"section-1729184836777","layout":"MAIN_SIDE","bgColor":"transparent","showTitle":false,"showDescription":false,"textPosition":"CENTER","textColor":"var(--lia-bs-body-color)","sectionEditLevel":null,"bgImage":null,"disableSpacing":null,"edgeToEdgeDisplay":null,"fullHeight":null,"showBorder":null,"__typename":"MainSideQuiltSection","columnMap":{"main":[],"side":[{"id":"custom.widget.Social_Sharing","className":null,"props":{"widgetVisibility":"signedInOrAnonymous","useTitle":true,"useBackground":true,"title":"Share","lazyLoad":false},"__typename":"QuiltComponent"}],"__typename":"MainSideSectionColumns"}}],"__typename":"QuiltContainer"},"__typename":"Quilt","localOverride":false},"localOverride":false},"CachedAsset:text:en_US-pages/blogs/BlogMessagePage-1737128950293":{"__typename":"CachedAsset","id":"text:en_US-pages/blogs/BlogMessagePage-1737128950293","value":{"title":"{contextMessageSubject} | {communityTitle}","errorMissing":"This blog post cannot be found","name":"Blog Message Page","section.blog-article.title":"Blog Post","archivedMessageTitle":"This Content Has Been Archived","section.section-1729184836777.title":"","section.section-1729184836777.description":"","section.CncIde.title":"Blog Post","section.tifEmD.description":"","section.tifEmD.title":""},"localOverride":false},"CachedAsset:quiltWrapper:o365.prod:Common:1737128932676":{"__typename":"CachedAsset","id":"quiltWrapper:o365.prod:Common:1737128932676","value":{"id":"Common","header":{"backgroundImageProps":{"assetName":null,"backgroundSize":"COVER","backgroundRepeat":"NO_REPEAT","backgroundPosition":"CENTER_CENTER","lastModified":null,"__typename":"BackgroundImageProps"},"backgroundColor":"transparent","items":[{"id":"community.widget.navbarWidget","props":{"showUserName":true,"showRegisterLink":true,"useIconLanguagePicker":true,"useLabelLanguagePicker":true,"className":"QuiltComponent_lia-component-edit-mode__0nCcm","links":{"sideLinks":[],"mainLinks":[{"children":[],"linkType":"INTERNAL","id":"gxcuf89792","params":{},"routeName":"CommunityPage"},{"children":[],"linkType":"EXTERNAL","id":"external-link","url":"/Directory","target":"SELF"},{"children":[{"linkType":"INTERNAL","id":"microsoft365","params":{"categoryId":"microsoft365"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"microsoft-teams","params":{"categoryId":"MicrosoftTeams"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"windows","params":{"categoryId":"Windows"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"microsoft-securityand-compliance","params":{"categoryId":"MicrosoftSecurityandCompliance"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"outlook","params":{"categoryId":"Outlook"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"planner","params":{"categoryId":"Planner"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"windows-server","params":{"categoryId":"Windows-Server"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"azure","params":{"categoryId":"Azure"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"exchange","params":{"categoryId":"Exchange"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"microsoft-endpoint-manager","params":{"categoryId":"microsoft-endpoint-manager"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"s-q-l-server","params":{"categoryId":"SQL-Server"},"routeName":"CategoryPage"},{"linkType":"EXTERNAL","id":"external-link-2","url":"/Directory","target":"SELF"}],"linkType":"EXTERNAL","id":"communities","url":"/","target":"BLANK"},{"children":[{"linkType":"INTERNAL","id":"education-sector","params":{"categoryId":"EducationSector"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"a-i","params":{"categoryId":"AI"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"i-t-ops-talk","params":{"categoryId":"ITOpsTalk"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"partner-community","params":{"categoryId":"PartnerCommunity"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"microsoft-mechanics","params":{"categoryId":"MicrosoftMechanics"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"healthcare-and-life-sciences","params":{"categoryId":"HealthcareAndLifeSciences"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"public-sector","params":{"categoryId":"PublicSector"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"io-t","params":{"categoryId":"IoT"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"driving-adoption","params":{"categoryId":"DrivingAdoption"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"s-m-b","params":{"categoryId":"SMB"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"startupsat-microsoft","params":{"categoryId":"StartupsatMicrosoft"},"routeName":"CategoryPage"},{"linkType":"EXTERNAL","id":"external-link-1","url":"/Directory","target":"SELF"}],"linkType":"EXTERNAL","id":"communities-1","url":"/","target":"SELF"},{"children":[],"linkType":"EXTERNAL","id":"external","url":"/Blogs","target":"SELF"},{"children":[],"linkType":"EXTERNAL","id":"external-1","url":"/Events","target":"SELF"},{"children":[{"linkType":"INTERNAL","id":"microsoft-learn-1","params":{"categoryId":"MicrosoftLearn"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"microsoft-learn-blog","params":{"boardId":"MicrosoftLearnBlog","categoryId":"MicrosoftLearn"},"routeName":"BlogBoardPage"},{"linkType":"EXTERNAL","id":"external-10","url":"https://learningroomdirectory.microsoft.com/","target":"BLANK"},{"linkType":"EXTERNAL","id":"external-3","url":"https://docs.microsoft.com/learn/dynamics365/?WT.mc_id=techcom_header-webpage-m365","target":"BLANK"},{"linkType":"EXTERNAL","id":"external-4","url":"https://docs.microsoft.com/learn/m365/?wt.mc_id=techcom_header-webpage-m365","target":"BLANK"},{"linkType":"EXTERNAL","id":"external-5","url":"https://docs.microsoft.com/learn/topics/sci/?wt.mc_id=techcom_header-webpage-m365","target":"BLANK"},{"linkType":"EXTERNAL","id":"external-6","url":"https://docs.microsoft.com/learn/powerplatform/?wt.mc_id=techcom_header-webpage-powerplatform","target":"BLANK"},{"linkType":"EXTERNAL","id":"external-7","url":"https://docs.microsoft.com/learn/github/?wt.mc_id=techcom_header-webpage-github","target":"BLANK"},{"linkType":"EXTERNAL","id":"external-8","url":"https://docs.microsoft.com/learn/teams/?wt.mc_id=techcom_header-webpage-teams","target":"BLANK"},{"linkType":"EXTERNAL","id":"external-9","url":"https://docs.microsoft.com/learn/dotnet/?wt.mc_id=techcom_header-webpage-dotnet","target":"BLANK"},{"linkType":"EXTERNAL","id":"external-2","url":"https://docs.microsoft.com/learn/azure/?WT.mc_id=techcom_header-webpage-m365","target":"BLANK"}],"linkType":"INTERNAL","id":"microsoft-learn","params":{"categoryId":"MicrosoftLearn"},"routeName":"CategoryPage"},{"children":[],"linkType":"INTERNAL","id":"community-info-center","params":{"categoryId":"Community-Info-Center"},"routeName":"CategoryPage"}]},"style":{"boxShadow":"var(--lia-bs-box-shadow-sm)","controllerHighlightColor":"hsla(30, 100%, 50%)","linkFontWeight":"400","dropdownDividerMarginBottom":"10px","hamburgerBorderHover":"none","linkBoxShadowHover":"none","linkFontSize":"14px","backgroundOpacity":0.8,"controllerBorderRadius":"var(--lia-border-radius-50)","hamburgerBgColor":"transparent","hamburgerColor":"var(--lia-nav-controller-icon-color)","linkTextBorderBottom":"none","brandLogoHeight":"30px","linkBgHoverColor":"transparent","linkLetterSpacing":"normal","collapseMenuDividerOpacity":0.16,"dropdownPaddingBottom":"15px","paddingBottom":"15px","dropdownMenuOffset":"2px","hamburgerBgHoverColor":"transparent","borderBottom":"1px solid var(--lia-bs-border-color)","hamburgerBorder":"none","dropdownPaddingX":"10px","brandMarginRightSm":"10px","linkBoxShadow":"none","collapseMenuDividerBg":"var(--lia-nav-link-color)","linkColor":"var(--lia-bs-body-color)","linkJustifyContent":"flex-start","dropdownPaddingTop":"10px","controllerHighlightTextColor":"var(--lia-yiq-dark)","controllerTextColor":"var(--lia-nav-controller-icon-color)","background":{"imageAssetName":"","color":"var(--lia-bs-white)","size":"COVER","repeat":"NO_REPEAT","position":"CENTER_CENTER","imageLastModified":""},"linkBorderRadius":"var(--lia-bs-border-radius-sm)","linkHoverColor":"var(--lia-bs-body-color)","position":"FIXED","linkBorder":"none","linkTextBorderBottomHover":"2px solid var(--lia-bs-body-color)","brandMarginRight":"30px","hamburgerHoverColor":"var(--lia-nav-controller-icon-color)","linkBorderHover":"none","collapseMenuMarginLeft":"20px","linkFontStyle":"NORMAL","controllerTextHoverColor":"var(--lia-nav-controller-icon-hover-color)","linkPaddingX":"10px","linkPaddingY":"5px","paddingTop":"15px","linkTextTransform":"NONE","dropdownBorderColor":"hsla(var(--lia-bs-black-h), var(--lia-bs-black-s), var(--lia-bs-black-l), 0.08)","controllerBgHoverColor":"hsla(var(--lia-bs-black-h), var(--lia-bs-black-s), var(--lia-bs-black-l), 0.1)","linkBgColor":"transparent","linkDropdownPaddingX":"var(--lia-nav-link-px)","linkDropdownPaddingY":"9px","controllerIconColor":"var(--lia-bs-body-color)","dropdownDividerMarginTop":"10px","linkGap":"10px","controllerIconHoverColor":"var(--lia-bs-body-color)"},"showSearchIcon":false,"languagePickerStyle":"iconAndLabel"},"__typename":"QuiltComponent"},{"id":"community.widget.breadcrumbWidget","props":{"backgroundColor":"transparent","linkHighlightColor":"var(--lia-bs-primary)","visualEffects":{"showBottomBorder":true},"linkTextColor":"var(--lia-bs-gray-700)"},"__typename":"QuiltComponent"},{"id":"custom.widget.HeroBanner","props":{"widgetVisibility":"signedInOrAnonymous","useTitle":true,"usePageWidth":false,"useBackground":false,"cMax_items":3,"title":"","lazyLoad":false,"widgetChooser":"custom.widget.HeroBanner"},"__typename":"QuiltComponent"}],"__typename":"QuiltWrapperSection"},"footer":{"backgroundImageProps":{"assetName":null,"backgroundSize":"COVER","backgroundRepeat":"NO_REPEAT","backgroundPosition":"CENTER_CENTER","lastModified":null,"__typename":"BackgroundImageProps"},"backgroundColor":"transparent","items":[{"id":"custom.widget.MicrosoftFooter","props":{"widgetVisibility":"signedInOrAnonymous","useTitle":true,"useBackground":false,"title":"","lazyLoad":false},"__typename":"QuiltComponent"}],"__typename":"QuiltWrapperSection"},"__typename":"QuiltWrapper","localOverride":false},"localOverride":false},"CachedAsset:text:en_US-components/common/ActionFeedback-1737128950293":{"__typename":"CachedAsset","id":"text:en_US-components/common/ActionFeedback-1737128950293","value":{"joinedGroupHub.title":"Welcome","joinedGroupHub.message":"You are now a member of this group and are subscribed to updates.","groupHubInviteNotFound.title":"Invitation Not Found","groupHubInviteNotFound.message":"Sorry, we could not find your invitation to the group. The owner may have canceled the invite.","groupHubNotFound.title":"Group Not Found","groupHubNotFound.message":"The grouphub you tried to join does not exist. It may have been deleted.","existingGroupHubMember.title":"Already Joined","existingGroupHubMember.message":"You are already a member of this group.","accountLocked.title":"Account Locked","accountLocked.message":"Your account has been locked due to multiple failed attempts. Try again in {lockoutTime} minutes.","editedGroupHub.title":"Changes Saved","editedGroupHub.message":"Your group has been updated.","leftGroupHub.title":"Goodbye","leftGroupHub.message":"You are no longer a member of this group and will not receive future updates.","deletedGroupHub.title":"Deleted","deletedGroupHub.message":"The group has been deleted.","groupHubCreated.title":"Group Created","groupHubCreated.message":"{groupHubName} is ready to use","accountClosed.title":"Account Closed","accountClosed.message":"The account has been closed and you will now be redirected to the homepage","resetTokenExpired.title":"Reset Password Link has Expired","resetTokenExpired.message":"Try resetting your password again","invalidUrl.title":"Invalid URL","invalidUrl.message":"The URL you're using is not recognized. Verify your URL and try again.","accountClosedForUser.title":"Account Closed","accountClosedForUser.message":"{userName}'s account is closed","inviteTokenInvalid.title":"Invitation Invalid","inviteTokenInvalid.message":"Your invitation to the community has been canceled or expired.","inviteTokenError.title":"Invitation Verification Failed","inviteTokenError.message":"The url you are utilizing is not recognized. Verify your URL and try again","pageNotFound.title":"Access Denied","pageNotFound.message":"You do not have access to this area of the community or it doesn't exist","eventAttending.title":"Responded as Attending","eventAttending.message":"You'll be notified when there's new activity and reminded as the event approaches","eventInterested.title":"Responded as Interested","eventInterested.message":"You'll be notified when there's new activity and reminded as the event approaches","eventNotFound.title":"Event Not Found","eventNotFound.message":"The event you tried to respond to does not exist.","redirectToRelatedPage.title":"Showing Related Content","redirectToRelatedPageForBaseUsers.title":"Showing Related Content","redirectToRelatedPageForBaseUsers.message":"The content you are trying to access is archived","redirectToRelatedPage.message":"The content you are trying to access is archived","relatedUrl.archivalLink.flyoutMessage":"The content you are trying to access is archived View Archived Content"},"localOverride":false},"CachedAsset:component:custom.widget.HeroBanner-en-1737129007284":{"__typename":"CachedAsset","id":"component:custom.widget.HeroBanner-en-1737129007284","value":{"component":{"id":"custom.widget.HeroBanner","template":{"id":"HeroBanner","markupLanguage":"REACT","style":null,"texts":{"searchPlaceholderText":"Search this community","followActionText":"Follow","unfollowActionText":"Following","searchOnHoverText":"Please enter your search term(s) and then press return key to complete a search."},"defaults":{"config":{"applicablePages":[],"dynamicByCoreNode":null,"description":null,"fetchedContent":null,"__typename":"ComponentConfiguration"},"props":[{"id":"max_items","dataType":"NUMBER","list":false,"defaultValue":"3","label":"Max Items","description":"The maximum number of items to display in the carousel","possibleValues":null,"control":"INPUT","__typename":"PropDefinition"}],"__typename":"ComponentProperties"},"components":[{"id":"custom.widget.HeroBanner","form":{"fields":[{"id":"widgetChooser","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"title","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"useTitle","validation":null,"noValidation":null,"dataType":"BOOLEAN","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"useBackground","validation":null,"noValidation":null,"dataType":"BOOLEAN","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"widgetVisibility","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"moreOptions","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"cMax_items","validation":null,"noValidation":null,"dataType":"NUMBER","list":false,"control":"INPUT","defaultValue":"3","label":"Max Items","description":"The maximum number of items to display in the carousel","possibleValues":null,"__typename":"FormField"}],"layout":{"rows":[{"id":"widgetChooserGroup","type":"fieldset","as":null,"items":[{"id":"widgetChooser","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"titleGroup","type":"fieldset","as":null,"items":[{"id":"title","className":null,"__typename":"FormFieldRef"},{"id":"useTitle","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"useBackground","type":"fieldset","as":null,"items":[{"id":"useBackground","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"widgetVisibility","type":"fieldset","as":null,"items":[{"id":"widgetVisibility","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"moreOptionsGroup","type":"fieldset","as":null,"items":[{"id":"moreOptions","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"componentPropsGroup","type":"fieldset","as":null,"items":[{"id":"cMax_items","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"}],"actionButtons":null,"className":"custom_widget_HeroBanner_form","formGroupFieldSeparator":"divider","__typename":"FormLayout"},"__typename":"Form"},"config":null,"props":[],"__typename":"Component"}],"grouping":"CUSTOM","__typename":"ComponentTemplate"},"properties":{"config":{"applicablePages":[],"dynamicByCoreNode":null,"description":null,"fetchedContent":null,"__typename":"ComponentConfiguration"},"props":[{"id":"max_items","dataType":"NUMBER","list":false,"defaultValue":"3","label":"Max Items","description":"The maximum number of items to display in the carousel","possibleValues":null,"control":"INPUT","__typename":"PropDefinition"}],"__typename":"ComponentProperties"},"form":{"fields":[{"id":"widgetChooser","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"title","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"useTitle","validation":null,"noValidation":null,"dataType":"BOOLEAN","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"useBackground","validation":null,"noValidation":null,"dataType":"BOOLEAN","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"widgetVisibility","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"moreOptions","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"cMax_items","validation":null,"noValidation":null,"dataType":"NUMBER","list":false,"control":"INPUT","defaultValue":"3","label":"Max Items","description":"The maximum number of items to display in the carousel","possibleValues":null,"__typename":"FormField"}],"layout":{"rows":[{"id":"widgetChooserGroup","type":"fieldset","as":null,"items":[{"id":"widgetChooser","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"titleGroup","type":"fieldset","as":null,"items":[{"id":"title","className":null,"__typename":"FormFieldRef"},{"id":"useTitle","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"useBackground","type":"fieldset","as":null,"items":[{"id":"useBackground","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"widgetVisibility","type":"fieldset","as":null,"items":[{"id":"widgetVisibility","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"moreOptionsGroup","type":"fieldset","as":null,"items":[{"id":"moreOptions","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"componentPropsGroup","type":"fieldset","as":null,"items":[{"id":"cMax_items","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"}],"actionButtons":null,"className":"custom_widget_HeroBanner_form","formGroupFieldSeparator":"divider","__typename":"FormLayout"},"__typename":"Form"},"__typename":"Component","localOverride":false},"globalCss":null,"form":{"fields":[{"id":"widgetChooser","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"title","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"useTitle","validation":null,"noValidation":null,"dataType":"BOOLEAN","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"useBackground","validation":null,"noValidation":null,"dataType":"BOOLEAN","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"widgetVisibility","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"moreOptions","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"cMax_items","validation":null,"noValidation":null,"dataType":"NUMBER","list":false,"control":"INPUT","defaultValue":"3","label":"Max Items","description":"The maximum number of items to display in the carousel","possibleValues":null,"__typename":"FormField"}],"layout":{"rows":[{"id":"widgetChooserGroup","type":"fieldset","as":null,"items":[{"id":"widgetChooser","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"titleGroup","type":"fieldset","as":null,"items":[{"id":"title","className":null,"__typename":"FormFieldRef"},{"id":"useTitle","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"useBackground","type":"fieldset","as":null,"items":[{"id":"useBackground","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"widgetVisibility","type":"fieldset","as":null,"items":[{"id":"widgetVisibility","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"moreOptionsGroup","type":"fieldset","as":null,"items":[{"id":"moreOptions","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"componentPropsGroup","type":"fieldset","as":null,"items":[{"id":"cMax_items","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"}],"actionButtons":null,"className":"custom_widget_HeroBanner_form","formGroupFieldSeparator":"divider","__typename":"FormLayout"},"__typename":"Form"}},"localOverride":false},"CachedAsset:component:custom.widget.Social_Sharing-en-1737129007284":{"__typename":"CachedAsset","id":"component:custom.widget.Social_Sharing-en-1737129007284","value":{"component":{"id":"custom.widget.Social_Sharing","template":{"id":"Social_Sharing","markupLanguage":"HANDLEBARS","style":".social-share {\n .sharing-options {\n position: relative;\n margin: 0;\n padding: 0;\n line-height: 10px;\n display: flex;\n justify-content: left;\n gap: 5px;\n list-style-type: none;\n li {\n text-align: left;\n a {\n min-width: 30px;\n min-height: 30px;\n display: block;\n padding: 1px;\n .social-share-linkedin {\n img {\n background-color: rgb(0, 119, 181);\n }\n }\n .social-share-facebook {\n img {\n background-color: rgb(59, 89, 152);\n }\n }\n .social-share-x {\n img {\n background-color: rgb(0, 0, 0);\n }\n }\n .social-share-rss {\n img {\n background-color: rgb(0, 0, 0);\n }\n }\n .social-share-reddit {\n img {\n background-color: rgb(255, 69, 0);\n }\n }\n .social-share-email {\n img {\n background-color: rgb(132, 132, 132);\n }\n }\n }\n a {\n img {\n height: 2rem;\n }\n }\n }\n }\n}\n","texts":null,"defaults":{"config":{"applicablePages":[],"dynamicByCoreNode":false,"description":"Adds buttons to share to various social media websites","fetchedContent":null,"__typename":"ComponentConfiguration"},"props":[],"__typename":"ComponentProperties"},"components":[{"id":"custom.widget.Social_Sharing","form":null,"config":null,"props":[],"__typename":"Component"}],"grouping":"CUSTOM","__typename":"ComponentTemplate"},"properties":{"config":{"applicablePages":[],"dynamicByCoreNode":false,"description":"Adds buttons to share to various social media websites","fetchedContent":null,"__typename":"ComponentConfiguration"},"props":[],"__typename":"ComponentProperties"},"form":null,"__typename":"Component","localOverride":false},"globalCss":{"css":".custom_widget_Social_Sharing_social-share_c7xxz_1 {\n .custom_widget_Social_Sharing_sharing-options_c7xxz_2 {\n position: relative;\n margin: 0;\n padding: 0;\n line-height: 0.625rem;\n display: flex;\n justify-content: left;\n gap: 0.3125rem;\n list-style-type: none;\n li {\n text-align: left;\n a {\n min-width: 1.875rem;\n min-height: 1.875rem;\n display: block;\n padding: 0.0625rem;\n .custom_widget_Social_Sharing_social-share-linkedin_c7xxz_18 {\n img {\n background-color: rgb(0, 119, 181);\n }\n }\n .custom_widget_Social_Sharing_social-share-facebook_c7xxz_23 {\n img {\n background-color: rgb(59, 89, 152);\n }\n }\n .custom_widget_Social_Sharing_social-share-x_c7xxz_28 {\n img {\n background-color: rgb(0, 0, 0);\n }\n }\n .custom_widget_Social_Sharing_social-share-rss_c7xxz_33 {\n img {\n background-color: rgb(0, 0, 0);\n }\n }\n .custom_widget_Social_Sharing_social-share-reddit_c7xxz_38 {\n img {\n background-color: rgb(255, 69, 0);\n }\n }\n .custom_widget_Social_Sharing_social-share-email_c7xxz_43 {\n img {\n background-color: rgb(132, 132, 132);\n }\n }\n }\n a {\n img {\n height: 2rem;\n }\n }\n }\n }\n}\n","tokens":{"social-share":"custom_widget_Social_Sharing_social-share_c7xxz_1","sharing-options":"custom_widget_Social_Sharing_sharing-options_c7xxz_2","social-share-linkedin":"custom_widget_Social_Sharing_social-share-linkedin_c7xxz_18","social-share-facebook":"custom_widget_Social_Sharing_social-share-facebook_c7xxz_23","social-share-x":"custom_widget_Social_Sharing_social-share-x_c7xxz_28","social-share-rss":"custom_widget_Social_Sharing_social-share-rss_c7xxz_33","social-share-reddit":"custom_widget_Social_Sharing_social-share-reddit_c7xxz_38","social-share-email":"custom_widget_Social_Sharing_social-share-email_c7xxz_43"}},"form":null},"localOverride":false},"CachedAsset:component:custom.widget.MicrosoftFooter-en-1737129007284":{"__typename":"CachedAsset","id":"component:custom.widget.MicrosoftFooter-en-1737129007284","value":{"component":{"id":"custom.widget.MicrosoftFooter","template":{"id":"MicrosoftFooter","markupLanguage":"HANDLEBARS","style":".context-uhf {\n min-width: 280px;\n font-size: 15px;\n box-sizing: border-box;\n -ms-text-size-adjust: 100%;\n -webkit-text-size-adjust: 100%;\n & *,\n & *:before,\n & *:after {\n box-sizing: inherit;\n }\n a.c-uhff-link {\n color: #616161;\n word-break: break-word;\n text-decoration: none;\n }\n &a:link,\n &a:focus,\n &a:hover,\n &a:active,\n &a:visited {\n text-decoration: none;\n color: inherit;\n }\n & div {\n font-family: 'Segoe UI', SegoeUI, 'Helvetica Neue', Helvetica, Arial, sans-serif;\n }\n}\n.c-uhff {\n background: #f2f2f2;\n margin: -1.5625;\n width: auto;\n height: auto;\n}\n.c-uhff-nav {\n margin: 0 auto;\n max-width: calc(1600px + 10%);\n padding: 0 5%;\n box-sizing: inherit;\n &:before,\n &:after {\n content: ' ';\n display: table;\n clear: left;\n }\n @media only screen and (max-width: 1083px) {\n padding-left: 12px;\n }\n .c-heading-4 {\n color: #616161;\n word-break: break-word;\n font-size: 15px;\n line-height: 20px;\n padding: 36px 0 4px;\n font-weight: 600;\n }\n .c-uhff-nav-row {\n .c-uhff-nav-group {\n display: block;\n float: left;\n min-height: 1px;\n vertical-align: text-top;\n padding: 0 12px;\n width: 100%;\n zoom: 1;\n &:first-child {\n padding-left: 0;\n @media only screen and (max-width: 1083px) {\n padding-left: 12px;\n }\n }\n @media only screen and (min-width: 540px) and (max-width: 1082px) {\n width: 33.33333%;\n }\n @media only screen and (min-width: 1083px) {\n width: 16.6666666667%;\n }\n ul.c-list.f-bare {\n font-size: 11px;\n line-height: 16px;\n margin-top: 0;\n margin-bottom: 0;\n padding-left: 0;\n list-style-type: none;\n li {\n word-break: break-word;\n padding: 8px 0;\n margin: 0;\n }\n }\n }\n }\n}\n.c-uhff-base {\n background: #f2f2f2;\n margin: 0 auto;\n max-width: calc(1600px + 10%);\n padding: 30px 5% 16px;\n &:before,\n &:after {\n content: ' ';\n display: table;\n }\n &:after {\n clear: both;\n }\n a.c-uhff-ccpa {\n font-size: 11px;\n line-height: 16px;\n float: left;\n margin: 3px 0;\n }\n a.c-uhff-ccpa:hover {\n text-decoration: underline;\n }\n ul.c-list {\n font-size: 11px;\n line-height: 16px;\n float: right;\n margin: 3px 0;\n color: #616161;\n li {\n padding: 0 24px 4px 0;\n display: inline-block;\n }\n }\n .c-list.f-bare {\n padding-left: 0;\n list-style-type: none;\n }\n @media only screen and (max-width: 1083px) {\n display: flex;\n flex-wrap: wrap;\n padding: 30px 24px 16px;\n }\n}\n","texts":{"New tab":"What's New","New 1":"Surface Laptop Studio 2","New 2":"Surface Laptop Go 3","New 3":"Surface Pro 9","New 4":"Surface Laptop 5","New 5":"Surface Studio 2+","New 6":"Copilot in Windows","New 7":"Microsoft 365","New 8":"Windows 11 apps","Store tab":"Microsoft Store","Store 1":"Account Profile","Store 2":"Download Center","Store 3":"Microsoft Store Support","Store 4":"Returns","Store 5":"Order tracking","Store 6":"Certified Refurbished","Store 7":"Microsoft Store Promise","Store 8":"Flexible Payments","Education tab":"Education","Edu 1":"Microsoft in education","Edu 2":"Devices for education","Edu 3":"Microsoft Teams for Education","Edu 4":"Microsoft 365 Education","Edu 5":"How to buy for your school","Edu 6":"Educator Training and development","Edu 7":"Deals for students and parents","Edu 8":"Azure for students","Business tab":"Business","Bus 1":"Microsoft Cloud","Bus 2":"Microsoft Security","Bus 3":"Dynamics 365","Bus 4":"Microsoft 365","Bus 5":"Microsoft Power Platform","Bus 6":"Microsoft Teams","Bus 7":"Microsoft Industry","Bus 8":"Small Business","Developer tab":"Developer & IT","Dev 1":"Azure","Dev 2":"Developer Center","Dev 3":"Documentation","Dev 4":"Microsoft Learn","Dev 5":"Microsoft Tech Community","Dev 6":"Azure Marketplace","Dev 7":"AppSource","Dev 8":"Visual Studio","Company tab":"Company","Com 1":"Careers","Com 2":"About Microsoft","Com 3":"Company News","Com 4":"Privacy at Microsoft","Com 5":"Investors","Com 6":"Diversity and inclusion","Com 7":"Accessiblity","Com 8":"Sustainibility"},"defaults":{"config":{"applicablePages":[],"dynamicByCoreNode":false,"description":"The Microsoft Footer","fetchedContent":null,"__typename":"ComponentConfiguration"},"props":[],"__typename":"ComponentProperties"},"components":[{"id":"custom.widget.MicrosoftFooter","form":null,"config":null,"props":[],"__typename":"Component"}],"grouping":"CUSTOM","__typename":"ComponentTemplate"},"properties":{"config":{"applicablePages":[],"dynamicByCoreNode":false,"description":"The Microsoft Footer","fetchedContent":null,"__typename":"ComponentConfiguration"},"props":[],"__typename":"ComponentProperties"},"form":null,"__typename":"Component","localOverride":false},"globalCss":{"css":".custom_widget_MicrosoftFooter_context-uhf_f95yq_1 {\n min-width: 17.5rem;\n font-size: 0.9375rem;\n box-sizing: border-box;\n -ms-text-size-adjust: 100%;\n -webkit-text-size-adjust: 100%;\n & *,\n & *:before,\n & *:after {\n box-sizing: inherit;\n }\n a.custom_widget_MicrosoftFooter_c-uhff-link_f95yq_12 {\n color: #616161;\n word-break: break-word;\n text-decoration: none;\n }\n &a:link,\n &a:focus,\n &a:hover,\n &a:active,\n &a:visited {\n text-decoration: none;\n color: inherit;\n }\n & div {\n font-family: 'Segoe UI', SegoeUI, 'Helvetica Neue', Helvetica, Arial, sans-serif;\n }\n}\n.custom_widget_MicrosoftFooter_c-uhff_f95yq_12 {\n background: #f2f2f2;\n margin: -1.5625;\n width: auto;\n height: auto;\n}\n.custom_widget_MicrosoftFooter_c-uhff-nav_f95yq_35 {\n margin: 0 auto;\n max-width: calc(100rem + 10%);\n padding: 0 5%;\n box-sizing: inherit;\n &:before,\n &:after {\n content: ' ';\n display: table;\n clear: left;\n }\n @media only screen and (max-width: 1083px) {\n padding-left: 0.75rem;\n }\n .custom_widget_MicrosoftFooter_c-heading-4_f95yq_49 {\n color: #616161;\n word-break: break-word;\n font-size: 0.9375rem;\n line-height: 1.25rem;\n padding: 2.25rem 0 0.25rem;\n font-weight: 600;\n }\n .custom_widget_MicrosoftFooter_c-uhff-nav-row_f95yq_57 {\n .custom_widget_MicrosoftFooter_c-uhff-nav-group_f95yq_58 {\n display: block;\n float: left;\n min-height: 0.0625rem;\n vertical-align: text-top;\n padding: 0 0.75rem;\n width: 100%;\n zoom: 1;\n &:first-child {\n padding-left: 0;\n @media only screen and (max-width: 1083px) {\n padding-left: 0.75rem;\n }\n }\n @media only screen and (min-width: 540px) and (max-width: 1082px) {\n width: 33.33333%;\n }\n @media only screen and (min-width: 1083px) {\n width: 16.6666666667%;\n }\n ul.custom_widget_MicrosoftFooter_c-list_f95yq_78.custom_widget_MicrosoftFooter_f-bare_f95yq_78 {\n font-size: 0.6875rem;\n line-height: 1rem;\n margin-top: 0;\n margin-bottom: 0;\n padding-left: 0;\n list-style-type: none;\n li {\n word-break: break-word;\n padding: 0.5rem 0;\n margin: 0;\n }\n }\n }\n }\n}\n.custom_widget_MicrosoftFooter_c-uhff-base_f95yq_94 {\n background: #f2f2f2;\n margin: 0 auto;\n max-width: calc(100rem + 10%);\n padding: 1.875rem 5% 1rem;\n &:before,\n &:after {\n content: ' ';\n display: table;\n }\n &:after {\n clear: both;\n }\n a.custom_widget_MicrosoftFooter_c-uhff-ccpa_f95yq_107 {\n font-size: 0.6875rem;\n line-height: 1rem;\n float: left;\n margin: 0.1875rem 0;\n }\n a.custom_widget_MicrosoftFooter_c-uhff-ccpa_f95yq_107:hover {\n text-decoration: underline;\n }\n ul.custom_widget_MicrosoftFooter_c-list_f95yq_78 {\n font-size: 0.6875rem;\n line-height: 1rem;\n float: right;\n margin: 0.1875rem 0;\n color: #616161;\n li {\n padding: 0 1.5rem 0.25rem 0;\n display: inline-block;\n }\n }\n .custom_widget_MicrosoftFooter_c-list_f95yq_78.custom_widget_MicrosoftFooter_f-bare_f95yq_78 {\n padding-left: 0;\n list-style-type: none;\n }\n @media only screen and (max-width: 1083px) {\n display: flex;\n flex-wrap: wrap;\n padding: 1.875rem 1.5rem 1rem;\n }\n}\n","tokens":{"context-uhf":"custom_widget_MicrosoftFooter_context-uhf_f95yq_1","c-uhff-link":"custom_widget_MicrosoftFooter_c-uhff-link_f95yq_12","c-uhff":"custom_widget_MicrosoftFooter_c-uhff_f95yq_12","c-uhff-nav":"custom_widget_MicrosoftFooter_c-uhff-nav_f95yq_35","c-heading-4":"custom_widget_MicrosoftFooter_c-heading-4_f95yq_49","c-uhff-nav-row":"custom_widget_MicrosoftFooter_c-uhff-nav-row_f95yq_57","c-uhff-nav-group":"custom_widget_MicrosoftFooter_c-uhff-nav-group_f95yq_58","c-list":"custom_widget_MicrosoftFooter_c-list_f95yq_78","f-bare":"custom_widget_MicrosoftFooter_f-bare_f95yq_78","c-uhff-base":"custom_widget_MicrosoftFooter_c-uhff-base_f95yq_94","c-uhff-ccpa":"custom_widget_MicrosoftFooter_c-uhff-ccpa_f95yq_107"}},"form":null},"localOverride":false},"CachedAsset:text:en_US-components/community/Breadcrumb-1737128950293":{"__typename":"CachedAsset","id":"text:en_US-components/community/Breadcrumb-1737128950293","value":{"navLabel":"Breadcrumbs","dropdown":"Additional parent page navigation"},"localOverride":false},"CachedAsset:text:en_US-components/messages/MessageBanner-1737128950293":{"__typename":"CachedAsset","id":"text:en_US-components/messages/MessageBanner-1737128950293","value":{"messageMarkedAsSpam":"This post has been marked as spam","messageMarkedAsSpam@board:TKB":"This article has been marked as spam","messageMarkedAsSpam@board:BLOG":"This post has been marked as spam","messageMarkedAsSpam@board:FORUM":"This discussion has been marked as spam","messageMarkedAsSpam@board:OCCASION":"This event has been marked as spam","messageMarkedAsSpam@board:IDEA":"This idea has been marked as spam","manageSpam":"Manage Spam","messageMarkedAsAbuse":"This post has been marked as abuse","messageMarkedAsAbuse@board:TKB":"This article has been marked as abuse","messageMarkedAsAbuse@board:BLOG":"This post has been marked as abuse","messageMarkedAsAbuse@board:FORUM":"This discussion has been marked as abuse","messageMarkedAsAbuse@board:OCCASION":"This event has been marked as abuse","messageMarkedAsAbuse@board:IDEA":"This idea has been marked as abuse","preModCommentAuthorText":"This comment will be published as soon as it is approved","preModCommentModeratorText":"This comment is awaiting moderation","messageMarkedAsOther":"This post has been rejected due to other reasons","messageMarkedAsOther@board:TKB":"This article has been rejected due to other reasons","messageMarkedAsOther@board:BLOG":"This post has been rejected due to other reasons","messageMarkedAsOther@board:FORUM":"This discussion has been rejected due to other reasons","messageMarkedAsOther@board:OCCASION":"This event has been rejected due to other reasons","messageMarkedAsOther@board:IDEA":"This idea has been rejected due to other reasons","messageArchived":"This post was archived on {date}","relatedUrl":"View Related Content","relatedContentText":"Showing related content","archivedContentLink":"View Archived Content"},"localOverride":false},"Category:category:Exchange":{"__typename":"Category","id":"category:Exchange","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:Planner":{"__typename":"Category","id":"category:Planner","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:Outlook":{"__typename":"Category","id":"category:Outlook","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:Community-Info-Center":{"__typename":"Category","id":"category:Community-Info-Center","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:EducationSector":{"__typename":"Category","id":"category:EducationSector","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:DrivingAdoption":{"__typename":"Category","id":"category:DrivingAdoption","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:Azure":{"__typename":"Category","id":"category:Azure","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:Windows-Server":{"__typename":"Category","id":"category:Windows-Server","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:SQL-Server":{"__typename":"Category","id":"category:SQL-Server","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:MicrosoftTeams":{"__typename":"Category","id":"category:MicrosoftTeams","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:PublicSector":{"__typename":"Category","id":"category:PublicSector","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:microsoft365":{"__typename":"Category","id":"category:microsoft365","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:IoT":{"__typename":"Category","id":"category:IoT","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:HealthcareAndLifeSciences":{"__typename":"Category","id":"category:HealthcareAndLifeSciences","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:SMB":{"__typename":"Category","id":"category:SMB","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:ITOpsTalk":{"__typename":"Category","id":"category:ITOpsTalk","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:microsoft-endpoint-manager":{"__typename":"Category","id":"category:microsoft-endpoint-manager","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:MicrosoftSecurityandCompliance":{"__typename":"Category","id":"category:MicrosoftSecurityandCompliance","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:MicrosoftLearn":{"__typename":"Category","id":"category:MicrosoftLearn","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Blog:board:MicrosoftLearnBlog":{"__typename":"Blog","id":"board:MicrosoftLearnBlog","blogPolicies":{"__typename":"BlogPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}},"boardPolicies":{"__typename":"BoardPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:MicrosoftMechanics":{"__typename":"Category","id":"category:MicrosoftMechanics","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:StartupsatMicrosoft":{"__typename":"Category","id":"category:StartupsatMicrosoft","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:PartnerCommunity":{"__typename":"Category","id":"category:PartnerCommunity","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:Windows":{"__typename":"Category","id":"category:Windows","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"QueryVariables:TopicReplyList:message:4362539:4":{"__typename":"QueryVariables","id":"TopicReplyList:message:4362539:4","value":{"id":"message:4362539","first":10,"sorts":{"postTime":{"direction":"DESC"}},"repliesFirst":3,"repliesFirstDepthThree":1,"repliesSorts":{"postTime":{"direction":"DESC"}},"useAvatar":true,"useAuthorLogin":true,"useAuthorRank":true,"useBody":true,"useKudosCount":true,"useTimeToRead":false,"useMedia":false,"useReadOnlyIcon":false,"useRepliesCount":true,"useSearchSnippet":false,"useAcceptedSolutionButton":false,"useSolvedBadge":false,"useAttachments":false,"attachmentsFirst":5,"useTags":true,"useNodeAncestors":false,"useUserHoverCard":true,"useNodeHoverCard":false,"useModerationStatus":true,"usePreviewSubjectModal":false,"useMessageStatus":true}},"ROOT_MUTATION":{"__typename":"Mutation"},"CachedAsset:text:en_US-components/community/Navbar-1737128950293":{"__typename":"CachedAsset","id":"text:en_US-components/community/Navbar-1737128950293","value":{"community":"Community Home","inbox":"Inbox","manageContent":"Manage Content","tos":"Terms of Service","forgotPassword":"Forgot Password","themeEditor":"Theme Editor","edit":"Edit Navigation Bar","skipContent":"Skip to content","gxcuf89792":"Tech Community","external-1":"Events","s-m-b":"Small and Medium Businesses","windows-server":"Windows Server","education-sector":"Education Sector","driving-adoption":"Driving Adoption","microsoft-learn":"Microsoft Learn","s-q-l-server":"SQL Server","partner-community":"Microsoft Partner Community","microsoft365":"Microsoft 365","external-9":".NET","external-8":"Teams","external-7":"Github","products-services":"Products","external-6":"Power Platform","communities-1":"Topics","external-5":"Microsoft Security","planner":"Planner","external-4":"Microsoft 365","external-3":"Dynamics 365","azure":"Azure","healthcare-and-life-sciences":"Healthcare and Life Sciences","external-2":"Azure","microsoft-mechanics":"Microsoft Mechanics","microsoft-learn-1":"Community","external-10":"Learning Room Directory","microsoft-learn-blog":"Blog","windows":"Windows","i-t-ops-talk":"ITOps Talk","external-link-1":"View All","microsoft-securityand-compliance":"Microsoft Security","public-sector":"Public Sector","community-info-center":"Lounge","external-link-2":"View All","microsoft-teams":"Microsoft Teams","external":"Blogs","microsoft-endpoint-manager":"Microsoft Intune and Configuration Manager","startupsat-microsoft":"Startups at Microsoft","exchange":"Exchange","a-i":"AI and Machine Learning","io-t":"Internet of Things (IoT)","outlook":"Outlook","external-link":"Community Hubs","communities":"Products"},"localOverride":false},"CachedAsset:text:en_US-components/community/NavbarHamburgerDropdown-1737128950293":{"__typename":"CachedAsset","id":"text:en_US-components/community/NavbarHamburgerDropdown-1737128950293","value":{"hamburgerLabel":"Side Menu"},"localOverride":false},"CachedAsset:text:en_US-components/community/BrandLogo-1737128950293":{"__typename":"CachedAsset","id":"text:en_US-components/community/BrandLogo-1737128950293","value":{"logoAlt":"Khoros","themeLogoAlt":"Brand Logo"},"localOverride":false},"CachedAsset:text:en_US-components/community/NavbarTextLinks-1737128950293":{"__typename":"CachedAsset","id":"text:en_US-components/community/NavbarTextLinks-1737128950293","value":{"more":"More"},"localOverride":false},"CachedAsset:text:en_US-components/authentication/AuthenticationLink-1737128950293":{"__typename":"CachedAsset","id":"text:en_US-components/authentication/AuthenticationLink-1737128950293","value":{"title.login":"Sign In","title.registration":"Register","title.forgotPassword":"Forgot Password","title.multiAuthLogin":"Sign In"},"localOverride":false},"CachedAsset:text:en_US-components/nodes/NodeLink-1737128950293":{"__typename":"CachedAsset","id":"text:en_US-components/nodes/NodeLink-1737128950293","value":{"place":"Place {name}"},"localOverride":false},"CachedAsset:text:en_US-components/messages/MessageView/MessageViewStandard-1737128950293":{"__typename":"CachedAsset","id":"text:en_US-components/messages/MessageView/MessageViewStandard-1737128950293","value":{"anonymous":"Anonymous","author":"{messageAuthorLogin}","authorBy":"{messageAuthorLogin}","board":"{messageBoardTitle}","replyToUser":" to {parentAuthor}","showMoreReplies":"Show More","replyText":"Reply","repliesText":"Replies","markedAsSolved":"Marked as Solved","movedMessagePlaceholder.BLOG":"{count, plural, =0 {This comment has been} other {These comments have been} }","movedMessagePlaceholder.TKB":"{count, plural, =0 {This comment has been} other {These comments have been} }","movedMessagePlaceholder.FORUM":"{count, plural, =0 {This reply has been} other {These replies have been} }","movedMessagePlaceholder.IDEA":"{count, plural, =0 {This comment has been} other {These comments have been} }","movedMessagePlaceholder.OCCASION":"{count, plural, =0 {This comment has been} other {These comments have been} }","movedMessagePlaceholderUrlText":"moved.","messageStatus":"Status: ","statusChanged":"Status changed: {previousStatus} to {currentStatus}","statusAdded":"Status added: {status}","statusRemoved":"Status removed: {status}","labelExpand":"expand replies","labelCollapse":"collapse replies","unhelpfulReason.reason1":"Content is outdated","unhelpfulReason.reason2":"Article is missing information","unhelpfulReason.reason3":"Content is for a different Product","unhelpfulReason.reason4":"Doesn't match what I was searching for"},"localOverride":false},"CachedAsset:text:en_US-components/messages/ThreadedReplyList-1737128950293":{"__typename":"CachedAsset","id":"text:en_US-components/messages/ThreadedReplyList-1737128950293","value":{"title":"{count, plural, one{# Reply} other{# Replies}}","title@board:BLOG":"{count, plural, one{# Comment} other{# Comments}}","title@board:TKB":"{count, plural, one{# Comment} other{# Comments}}","title@board:IDEA":"{count, plural, one{# Comment} other{# Comments}}","title@board:OCCASION":"{count, plural, one{# Comment} other{# Comments}}","noRepliesTitle":"No Replies","noRepliesTitle@board:BLOG":"No Comments","noRepliesTitle@board:TKB":"No Comments","noRepliesTitle@board:IDEA":"No Comments","noRepliesTitle@board:OCCASION":"No Comments","noRepliesDescription":"Be the first to reply","noRepliesDescription@board:BLOG":"Be the first to comment","noRepliesDescription@board:TKB":"Be the first to comment","noRepliesDescription@board:IDEA":"Be the first to comment","noRepliesDescription@board:OCCASION":"Be the first to comment","messageReadOnlyAlert:BLOG":"Comments have been turned off for this post","messageReadOnlyAlert:TKB":"Comments have been turned off for this article","messageReadOnlyAlert:IDEA":"Comments have been turned off for this idea","messageReadOnlyAlert:FORUM":"Replies have been turned off for this discussion","messageReadOnlyAlert:OCCASION":"Comments have been turned off for this event"},"localOverride":false},"CachedAsset:text:en_US-components/messages/MessageReplyCallToAction-1737128950293":{"__typename":"CachedAsset","id":"text:en_US-components/messages/MessageReplyCallToAction-1737128950293","value":{"leaveReply":"Leave a reply...","leaveReply@board:BLOG@message:root":"Leave a comment...","leaveReply@board:TKB@message:root":"Leave a comment...","leaveReply@board:IDEA@message:root":"Leave a comment...","leaveReply@board:OCCASION@message:root":"Leave a comment...","repliesTurnedOff.FORUM":"Replies are turned off for this topic","repliesTurnedOff.BLOG":"Comments are turned off for this topic","repliesTurnedOff.TKB":"Comments are turned off for this topic","repliesTurnedOff.IDEA":"Comments are turned off for this topic","repliesTurnedOff.OCCASION":"Comments are turned off for this topic","infoText":"Stop poking me!"},"localOverride":false},"CachedAsset:text:en_US-components/customComponent/CustomComponent-1737128950293":{"__typename":"CachedAsset","id":"text:en_US-components/customComponent/CustomComponent-1737128950293","value":{"errorMessage":"Error rendering component id: {customComponentId}","bannerTitle":"Video provider requires cookies to play the video. Accept to continue or {url} it directly on the provider's site.","buttonTitle":"Accept","urlText":"watch"},"localOverride":false},"CachedAsset:text:en_US-components/community/NavbarDropdownToggle-1737128950293":{"__typename":"CachedAsset","id":"text:en_US-components/community/NavbarDropdownToggle-1737128950293","value":{"ariaLabelClosed":"Press the down arrow to open the menu"},"localOverride":false},"CachedAsset:text:en_US-components/messages/MessageCoverImage-1737128950293":{"__typename":"CachedAsset","id":"text:en_US-components/messages/MessageCoverImage-1737128950293","value":{"coverImageTitle":"Cover Image"},"localOverride":false},"CachedAsset:text:en_US-shared/client/components/nodes/NodeTitle-1737128950293":{"__typename":"CachedAsset","id":"text:en_US-shared/client/components/nodes/NodeTitle-1737128950293","value":{"nodeTitle":"{nodeTitle, select, community {Community} other {{nodeTitle}}} "},"localOverride":false},"CachedAsset:text:en_US-components/messages/MessageTimeToRead-1737128950293":{"__typename":"CachedAsset","id":"text:en_US-components/messages/MessageTimeToRead-1737128950293","value":{"minReadText":"{min} MIN READ"},"localOverride":false},"CachedAsset:text:en_US-components/messages/MessageSubject-1737128950293":{"__typename":"CachedAsset","id":"text:en_US-components/messages/MessageSubject-1737128950293","value":{"noSubject":"(no subject)"},"localOverride":false},"CachedAsset:text:en_US-components/users/UserLink-1737128950293":{"__typename":"CachedAsset","id":"text:en_US-components/users/UserLink-1737128950293","value":{"authorName":"View Profile: {author}","anonymous":"Anonymous"},"localOverride":false},"CachedAsset:text:en_US-shared/client/components/users/UserRank-1737128950293":{"__typename":"CachedAsset","id":"text:en_US-shared/client/components/users/UserRank-1737128950293","value":{"rankName":"{rankName}","userRank":"Author rank {rankName}"},"localOverride":false},"CachedAsset:text:en_US-components/messages/MessageTime-1737128950293":{"__typename":"CachedAsset","id":"text:en_US-components/messages/MessageTime-1737128950293","value":{"postTime":"Published: {time}","lastPublishTime":"Last Update: {time}","conversation.lastPostingActivityTime":"Last posting activity time: {time}","conversation.lastPostTime":"Last post time: {time}","moderationData.rejectTime":"Rejected time: {time}"},"localOverride":false},"CachedAsset:text:en_US-components/messages/MessageBody-1737128950293":{"__typename":"CachedAsset","id":"text:en_US-components/messages/MessageBody-1737128950293","value":{"showMessageBody":"Show More","mentionsErrorTitle":"{mentionsType, select, board {Board} user {User} message {Message} other {}} No Longer Available","mentionsErrorMessage":"The {mentionsType} you are trying to view has been removed from the community.","videoProcessing":"Video is being processed. Please try again in a few minutes.","bannerTitle":"Video provider requires cookies to play the video. Accept to continue or {url} it directly on the provider's site.","buttonTitle":"Accept","urlText":"watch"},"localOverride":false},"CachedAsset:text:en_US-components/messages/MessageCustomFields-1737128950293":{"__typename":"CachedAsset","id":"text:en_US-components/messages/MessageCustomFields-1737128950293","value":{"CustomField.default.label":"Value of {name}"},"localOverride":false},"CachedAsset:text:en_US-components/messages/MessageRevision-1737128950293":{"__typename":"CachedAsset","id":"text:en_US-components/messages/MessageRevision-1737128950293","value":{"lastUpdatedDatePublished":"{publishCount, plural, one{Published} other{Updated}} {date}","lastUpdatedDateDraft":"Created {date}","version":"Version {major}.{minor}"},"localOverride":false},"CachedAsset:text:en_US-shared/client/components/common/QueryHandler-1737128950293":{"__typename":"CachedAsset","id":"text:en_US-shared/client/components/common/QueryHandler-1737128950293","value":{"title":"Query Handler"},"localOverride":false},"CachedAsset:text:en_US-components/messages/MessageReplyButton-1737128950293":{"__typename":"CachedAsset","id":"text:en_US-components/messages/MessageReplyButton-1737128950293","value":{"repliesCount":"{count}","title":"Reply","title@board:BLOG@message:root":"Comment","title@board:TKB@message:root":"Comment","title@board:IDEA@message:root":"Comment","title@board:OCCASION@message:root":"Comment"},"localOverride":false},"CachedAsset:text:en_US-components/messages/MessageAuthorBio-1737128950293":{"__typename":"CachedAsset","id":"text:en_US-components/messages/MessageAuthorBio-1737128950293","value":{"sendMessage":"Send Message","actionMessage":"Follow this blog board to get notified when there's new activity","coAuthor":"CO-PUBLISHER","contributor":"CONTRIBUTOR","userProfile":"View Profile","iconlink":"Go to {name} {type}"},"localOverride":false},"CachedAsset:text:en_US-shared/client/components/users/UserAvatar-1737128950293":{"__typename":"CachedAsset","id":"text:en_US-shared/client/components/users/UserAvatar-1737128950293","value":{"altText":"{login}'s avatar","altTextGeneric":"User's avatar"},"localOverride":false},"CachedAsset:text:en_US-shared/client/components/ranks/UserRankLabel-1737128950293":{"__typename":"CachedAsset","id":"text:en_US-shared/client/components/ranks/UserRankLabel-1737128950293","value":{"altTitle":"Icon for {rankName} rank"},"localOverride":false},"CachedAsset:text:en_US-components/users/UserRegistrationDate-1737128950293":{"__typename":"CachedAsset","id":"text:en_US-components/users/UserRegistrationDate-1737128950293","value":{"noPrefix":"{date}","withPrefix":"Joined {date}"},"localOverride":false},"CachedAsset:text:en_US-shared/client/components/nodes/NodeAvatar-1737128950293":{"__typename":"CachedAsset","id":"text:en_US-shared/client/components/nodes/NodeAvatar-1737128950293","value":{"altTitle":"Node avatar for {nodeTitle}"},"localOverride":false},"CachedAsset:text:en_US-shared/client/components/nodes/NodeDescription-1737128950293":{"__typename":"CachedAsset","id":"text:en_US-shared/client/components/nodes/NodeDescription-1737128950293","value":{"description":"{description}"},"localOverride":false},"CachedAsset:text:en_US-components/tags/TagView/TagViewChip-1737128950293":{"__typename":"CachedAsset","id":"text:en_US-components/tags/TagView/TagViewChip-1737128950293","value":{"tagLabelName":"Tag name {tagName}"},"localOverride":false},"CachedAsset:text:en_US-shared/client/components/nodes/NodeIcon-1737128950293":{"__typename":"CachedAsset","id":"text:en_US-shared/client/components/nodes/NodeIcon-1737128950293","value":{"contentType":"Content Type {style, select, FORUM {Forum} BLOG {Blog} TKB {Knowledge Base} IDEA {Ideas} OCCASION {Events} other {}} icon"},"localOverride":false}}}},"page":"/blogs/BlogMessagePage/BlogMessagePage","query":{"boardId":"machinelearningblog","messageSubject":"fine-tuning-small-language-models-for-function-calling-a-comprehensive-guide","messageId":"4362539"},"buildId":"_kRgk9XS0CJUuQkUSQ9os","runtimeConfig":{"buildInformationVisible":false,"logLevelApp":"info","logLevelMetrics":"info","openTelemetryClientEnabled":false,"openTelemetryConfigName":"o365","openTelemetryServiceVersion":"24.11.0","openTelemetryUniverse":"prod","openTelemetryCollector":"http://localhost:4318","openTelemetryRouteChangeAllowedTime":"5000","apolloDevToolsEnabled":false},"isFallback":false,"isExperimentalCompile":false,"dynamicIds":["./components/community/Navbar/NavbarWidget.tsx","./components/community/Breadcrumb/BreadcrumbWidget.tsx","./components/customComponent/CustomComponent/CustomComponent.tsx","./components/blogs/BlogArticleWidget/BlogArticleWidget.tsx","./components/external/components/ExternalComponent.tsx","./components/messages/MessageView/MessageViewStandard/MessageViewStandard.tsx","./components/messages/ThreadedReplyList/ThreadedReplyList.tsx","../shared/client/components/common/List/UnwrappedList/UnwrappedList.tsx","./components/tags/TagView/TagView.tsx","./components/tags/TagView/TagViewChip/TagViewChip.tsx"],"appGip":true,"scriptLoader":[{"id":"analytics","src":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/pagescripts/1729284608000/analytics.js?page.id=BlogMessagePage&entity.id=board%3Amachinelearningblog&entity.id=message%3A4362539","strategy":"afterInteractive"}]}