Agentic Ed in Action

AI agents enhance education systems to drive measurable outcomes

Pharmaceutical Giant

Accelerated Development

Curriculum development took months and relied on large vendor budgets and bespoke teams of FTEs and contractors.

We applied Agentic Ed thinking to transform their approach. Using AI-enhanced workflows configured to work with existing tools and standards, we cut course production time by 75% and vendor costs by 80%.

National Financial Firm

Service Quality by Design

Poor training for frontline representatives resulted in dismal CSAT scores and spikes in consumer complaints.

We applied learning science to redesign learning around scenario-based training with individualized simulations using AI-generated voices. Daily micro-learning addressed the evolving business needs. First call resolution improved 25% while CSAT scores increased 35%.

SaaS Analytics Leader

Global Customer Education

Rapid growth outpaced customer education and enablement, causing churn and missed opportunities.

We built an online academy offering evergreen, self-paced courses with hands-on, on-demand labs to learners worldwide. We designed a certification program that verified skills and expertise using project based assessments and Agentic AI. After three months, new customer retention improved by 30% and revenue related to retrained customers increased 20%.

Image & Text

JSON

{ "version": "1.0", "assessmentId": "ced-branching-agentic-ai-v1", "title": "Customer Education Diagnostic: Flywheel + Agentic AI", "ui": { "noBackButton": true, "showQuestionCounter": true, "continueLabel": "Continue", "progressMode": "dynamic_step", "notes": "Branching makes total questions variable; UI should show Step X, not X of N." }, "cta": { "label": "Schedule a conversation", "url": "https://YOUR-CALENDAR-LINK" }, "dimensions": [ { "id": "outcome_governance", "label": "Outcome Governance", "order": 1 }, { "id": "flywheel_signals", "label": "Flywheel Signals", "order": 2 }, { "id": "ai_workflow", "label": "AI-in-the-Workflow", "order": 3 }, { "id": "knowledge_architecture", "label": "Knowledge Architecture", "order": 4 }, { "id": "gtm_orchestration", "label": "Orchestration Across GTM", "order": 5 }, { "id": "personalization", "label": "Personalization & Moment-of-Need", "order": 6 }, { "id": "operating_system", "label": "Operating System & Standards", "order": 7 } ], "pages": [ { "type": "intro", "id": "intro", "title": "Customer Education Diagnostic", "subtitle": "Measure your flywheel signals, operating system, and AI readiness.", "body": [ "This diagnostic focuses on practices, not platitudes.", "You’ll receive a PDF report with your system shape, benchmark comparison, and a 90-day focus recommendation." ], "consent": { "label": "Email me my report (PDF).", "required": true }, "fields": [ { "id": "role", "label": "Role", "required": true, "options": ["Customer Education / Training","Customer Success","Support","Product","Marketing / Growth","RevOps / Enablement","Executive Leadership","Other"] }, { "id": "industry", "label": "Industry", "required": true, "options": ["B2B SaaS","Enterprise Software","FinTech","Healthcare","Manufacturing","Professional Services","Other"] }, { "id": "company_size", "label": "Company size", "required": true, "options": ["1–49","50–99","100–199","200–499","500–999","1,000–2,499","2,500–4,999","5,000–9,999","10,000–24,999","25,000+"] } ], "primaryButtonLabel": "Start", "defaultNextPageId": "n1" }, { "type": "narrative", "id": "n1", "title": "How to answer", "body": ["Choose the option that best reflects how you operate today.", "No back button. Answers are saved as you go."], "defaultNextPageId": "q01_leadership_view" }, /* ---------- CORE SPINE (Questions 1–18) ---------- */ { "type": "question", "id": "q01_leadership_view", "dimensionId": "outcome_governance", "prompt": "Which statement best describes how leadership views customer education?", "options": [ { "id": "a", "label": "Largely tactical or reactive", "score": 1 }, { "id": "b", "label": "Nice to have", "score": 2 }, { "id": "c", "label": "Important but secondary support function", "score": 3 }, { "id": "d", "label": "Strategic growth lever", "score": 4 } ], "defaultNextPageId": "q02_exec_sponsor" }, { "type": "question", "id": "q02_exec_sponsor", "dimensionId": "operating_system", "prompt": "Does your customer education program have an executive sponsor?", "options": [ { "id": "a", "label": "No", "score": 1 }, { "id": "b", "label": "Yes, but informally", "score": 2 }, { "id": "c", "label": "Yes, directly accountable (shared)", "score": 3 }, { "id": "d", "label": "Yes, directly accountable (clear owner + cadence)", "score": 4 } ], "defaultNextPageId": "q03_funding_model" }, { "type": "question", "id": "q03_funding_model", "dimensionId": "operating_system", "prompt": "How is customer education funded?", "options": [ { "id": "a", "label": "No formal budget", "score": 1 }, { "id": "b", "label": "Project-based / ad hoc", "score": 2 }, { "id": "c", "label": "Shared budget with another function", "score": 3 }, { "id": "d", "label": "Dedicated budget with planned investment", "score": 4 } ], "defaultNextPageId": "q04_team_size" }, { "type": "question", "id": "q04_team_size", "dimensionId": "operating_system", "prompt": "How many people work on your customer education team?", "options": [ { "id": "a", "label": "Just me", "score": 1 }, { "id": "b", "label": "2 people", "score": 2 }, { "id": "c", "label": "3–5 people", "score": 3 }, { "id": "d", "label": "6+ people", "score": 4 } ], "defaultNextPageId": "q05_strategy_owner" }, { "type": "question", "id": "q05_strategy_owner", "dimensionId": "operating_system", "prompt": "Which group owns customer education strategy?", "options": [ { "id": "a", "label": "No clear owner (ad hoc)", "score": 1 }, { "id": "b", "label": "Owned by one group, loosely coordinated", "score": 2 }, { "id": "c", "label": "Shared ownership across multiple groups", "score": 3 }, { "id": "d", "label": "Clear owner with cross-functional governance", "score": 4 } ], "defaultNextPageId": "q06_lifecycle_focus" }, { "type": "question", "id": "q06_lifecycle_focus", "dimensionId": "gtm_orchestration", "prompt": "At what point in the customer lifecycle does education play the largest role?", "options": [ { "id": "a", "label": "Pre-sale", "score": 1 }, { "id": "b", "label": "Onboarding / activation", "score": 2 }, { "id": "c", "label": "Ongoing usage optimization", "score": 3 }, { "id": "d", "label": "Expansion / renewal", "score": 4 } ], "defaultNextPageId": "q07_value_priority" }, { "type": "question", "id": "q07_value_priority", "dimensionId": "outcome_governance", "prompt": "When you prioritize work, what wins most often?", "options": [ { "id": "a", "label": "Asset output (courses shipped, videos published)", "score": 1 }, { "id": "b", "label": "Stakeholder urgency and requests", "score": 2 }, { "id": "c", "label": "Engagement (completions, attendance)", "score": 3 }, { "id": "d", "label": "Outcome hypotheses tied to business impact", "score": 4 } ], "defaultNextPageId": "q08_design_approach" }, { "type": "question", "id": "q08_design_approach", "dimensionId": "knowledge_architecture", "prompt": "Which statement best describes your learning design approach?", "options": [ { "id": "a", "label": "Primarily information delivery", "score": 1 }, { "id": "b", "label": "Primarily guided skill building (teach → practice → apply)", "score": 2 }, { "id": "c", "label": "Primarily performance support (moment of need)", "score": 3 }, { "id": "d", "label": "Blended approach with intentional choice by use case", "score": 4 } ], "defaultNextPageId": "q09_content_refresh" }, { "type": "question", "id": "q09_content_refresh", "dimensionId": "knowledge_architecture", "prompt": "How often is customer education content refreshed or updated?", "options": [ { "id": "a", "label": "Content is often outdated", "score": 1 }, { "id": "b", "label": "Some content is updated periodically", "score": 2 }, { "id": "c", "label": "Updates follow a cycle or release checklist", "score": 3 }, { "id": "d", "label": "Continuous refresh based on change signals and governance", "score": 4 } ], "defaultNextPageId": "q10_systems_in_experience" }, { "type": "question", "id": "q10_systems_in_experience", "dimensionId": "operating_system", "prompt": "Which statement best describes your learning systems ecosystem?", "options": [ { "id": "a", "label": "Primarily a single system (LMS/LXP) with limited connections", "score": 1 }, { "id": "b", "label": "Multiple systems exist but are loosely coordinated", "score": 2 }, { "id": "c", "label": "Systems are connected for reporting in some areas", "score": 3 }, { "id": "d", "label": "Systems are integrated and used to drive actions", "score": 4 } ], "defaultNextPageId": "q11_integration_depth" }, { "type": "question", "id": "q11_integration_depth", "dimensionId": "operating_system", "prompt": "How connected is learning data to CRM / CS platforms / support / product usage data?", "options": [ { "id": "a", "label": "Mostly disconnected", "score": 1 }, { "id": "b", "label": "Manual exports or ad hoc connections", "score": 2 }, { "id": "c", "label": "Partial integration for dashboards", "score": 3 }, { "id": "d", "label": "Integrated and used for targeting + measurement", "score": 4 } ], "defaultNextPageId": "q12_signal_definition" }, { "type": "question", "id": "q12_signal_definition", "dimensionId": "flywheel_signals", "prompt": "How does your organization know an account is adoption-ready (or at risk)?", "options": [ { "id": "a", "label": "Anecdotes and individual judgment", "score": 1 }, { "id": "b", "label": "Course activity proxies (completions/attendance)", "score": 2 }, { "id": "c", "label": "A mix of learning + product/support signals (informal)", "score": 3 }, { "id": "d", "label": "Defined signals that trigger actions across teams", "score": 4 } ], "defaultNextPageId": "q13_engagement_trigger" }, { "type": "question", "id": "q13_engagement_trigger", "dimensionId": "flywheel_signals", "prompt": "What primarily drives when customers are engaged around learning?", "options": [ { "id": "a", "label": "Ad hoc decisions", "score": 1 }, { "id": "b", "label": "Scheduled touchpoints (onboarding/QBR)", "score": 2 }, { "id": "c", "label": "Lifecycle stage + some signals", "score": 3 }, { "id": "d", "label": "Signals trigger defined interventions", "score": 4 } ], "defaultNextPageId": "q14_proactivity" }, { "type": "question", "id": "q14_proactivity", "dimensionId": "flywheel_signals", "prompt": "How proactive is your organization in engaging customers around learning?", "options": [ { "id": "a", "label": "Primarily reactive and customer-initiated", "score": 1 }, { "id": "b", "label": "Occasionally proactive but inconsistent", "score": 2 }, { "id": "c", "label": "Proactively driven by defined processes or signals", "score": 3 }, { "id": "d", "label": "Highly proactive and integrated into lifecycle plays", "score": 4 } ], "defaultNextPageId": "q15_crossfunctional_engagement" }, { "type": "question", "id": "q15_crossfunctional_engagement", "dimensionId": "gtm_orchestration", "prompt": "Which statement best describes how customer-facing teams engage customers around learning?", "options": [ { "id": "a", "label": "Education is rarely part of CS/support workflows", "score": 1 }, { "id": "b", "label": "Education appears in onboarding, but not beyond", "score": 2 }, { "id": "c", "label": "Education is included in QBRs / success plans inconsistently", "score": 3 }, { "id": "d", "label": "Education is embedded in CS/support plays with triggers", "score": 4 } ], "defaultNextPageId": "q16_measurement_usage" }, { "type": "question", "id": "q16_measurement_usage", "dimensionId": "outcome_governance", "prompt": "How is engagement and performance data used most often?", "options": [ { "id": "a", "label": "Reported and rarely acted on", "score": 1 }, { "id": "b", "label": "Used to improve content", "score": 2 }, { "id": "c", "label": "Used to demonstrate value to leadership", "score": 3 }, { "id": "d", "label": "Used to trigger interventions and measure impact", "score": 4 } ], "defaultNextPageId": "q17_roi_confidence" }, { "type": "question", "id": "q17_roi_confidence", "dimensionId": "outcome_governance", "prompt": "How confident are you in demonstrating customer education ROI?", "options": [ { "id": "a", "label": "Not confident", "score": 1 }, { "id": "b", "label": "Somewhat confident", "score": 2 }, { "id": "c", "label": "Confident for select outcomes", "score": 3 }, { "id": "d", "label": "Very confident with repeatable attribution", "score": 4 } ], "defaultNextPageId": "q18_gov_cadence" }, { "type": "question", "id": "q18_gov_cadence", "dimensionId": "operating_system", "prompt": "How often are customer education outcomes reviewed with leadership (with decisions made)?", "options": [ { "id": "a", "label": "Never / only when issues arise", "score": 1 }, { "id": "b", "label": "Annually", "score": 2 }, { "id": "c", "label": "Quarterly", "score": 3 }, { "id": "d", "label": "Monthly (or more) with a defined decision cadence", "score": 4 } ], "defaultNextPageId": "n2" }, { "type": "narrative", "id": "n2", "title": "Now the AI operating model", "body": ["Next we’ll go deeper on AI practices.", "Your path depends on how AI is used today."], "defaultNextPageId": "q19_ai_gateway" }, /* ---------- AI GATEWAY (Question 19) ---------- */ { "type": "question", "id": "q19_ai_gateway", "dimensionId": "ai_workflow", "prompt": "How is AI used in customer education operations today?", "options": [ { "id": "a", "label": "Little to no AI use", "score": 1, "nextPageId": "aiF_01_policy" }, { "id": "b", "label": "Individuals use AI for drafting and content tasks", "score": 2, "nextPageId": "aiC_01_standards" }, { "id": "c", "label": "AI supports standardized workflows (tagging, updates, QA)", "score": 3, "nextPageId": "aiW_01_workflows" }, { "id": "d", "label": "AI triggers actions using telemetry (routing, next-best interventions)", "score": 4, "nextPageId": "aiA_01_agents" } ] }, /* ---------- BRANCH 1: AI FOUNDATIONS (Questions 20–25) ---------- */ { "type": "question", "id": "aiF_01_policy", "dimensionId": "operating_system", "prompt": "Which statement best describes your AI policy for customer education work?", "options": [ { "id": "a", "label": "No policy; individuals decide", "score": 1 }, { "id": "b", "label": "Informal guidance, inconsistent adoption", "score": 2 }, { "id": "c", "label": "Documented policy for approved tools and use cases", "score": 3 }, { "id": "d", "label": "Policy + governance workflow + periodic review", "score": 4 } ], "defaultNextPageId": "aiF_02_tools" }, { "type": "question", "id": "aiF_02_tools", "dimensionId": "operating_system", "prompt": "Which best describes your access to approved AI tools?", "options": [ { "id": "a", "label": "No approved tools", "score": 1 }, { "id": "b", "label": "Some tools, mostly personal accounts", "score": 2 }, { "id": "c", "label": "Approved enterprise tools for key roles", "score": 3 }, { "id": "d", "label": "Standard toolchain with role-based access and logs", "score": 4 } ], "defaultNextPageId": "aiF_03_data_access" }, { "type": "question", "id": "aiF_03_data_access", "dimensionId": "operating_system", "prompt": "How accessible is the data needed for meaningful AI-assisted education (usage, support, CRM)?", "options": [ { "id": "a", "label": "Mostly inaccessible", "score": 1 }, { "id": "b", "label": "Accessible via manual exports", "score": 2 }, { "id": "c", "label": "Accessible via dashboards / limited integrations", "score": 3 }, { "id": "d", "label": "Accessible via governed pipelines and APIs", "score": 4 } ], "defaultNextPageId": "aiF_04_skills" }, { "type": "question", "id": "aiF_04_skills", "dimensionId": "operating_system", "prompt": "Who can build or maintain AI-assisted workflows in your program?", "options": [ { "id": "a", "label": "No one (not a capability today)", "score": 1 }, { "id": "b", "label": "A few individuals experimenting", "score": 2 }, { "id": "c", "label": "Defined owner(s) with time allocated", "score": 3 }, { "id": "d", "label": "Shared capability with standards + enablement", "score": 4 } ], "defaultNextPageId": "aiF_05_first_use_case" }, { "type": "question", "id": "aiF_05_first_use_case", "dimensionId": "ai_workflow", "prompt": "What is your most realistic first AI use case in the next 90 days?", "options": [ { "id": "a", "label": "Draft content faster", "score": 1 }, { "id": "b", "label": "Tag and organize existing content", "score": 2 }, { "id": "c", "label": "Detect changes and create update tasks", "score": 3 }, { "id": "d", "label": "Trigger targeted interventions from signals", "score": 4 } ], "defaultNextPageId": "q26_rejoin_ai_impact" }, { "type": "question", "id": "aiF_06_readiness_barrier", "dimensionId": "operating_system", "prompt": "What most limits AI adoption for your program today?", "options": [ { "id": "a", "label": "Risk/compliance concerns", "score": 1 }, { "id": "b", "label": "Tooling access and cost", "score": 2 }, { "id": "c", "label": "Data availability and integration", "score": 3 }, { "id": "d", "label": "Operating model and governance (ownership/cadence)", "score": 4 } ], "defaultNextPageId": "q26_rejoin_ai_impact" }, /* ---------- BRANCH 2: AI CONTENT OPS (Questions 20–25) ---------- */ { "type": "question", "id": "aiC_01_standards", "dimensionId": "knowledge_architecture", "prompt": "How standardized are your prompts, templates, and content patterns for AI-assisted authoring?", "options": [ { "id": "a", "label": "Not standardized", "score": 1 }, { "id": "b", "label": "Some shared examples, inconsistent use", "score": 2 }, { "id": "c", "label": "Standard templates for common outputs", "score": 3 }, { "id": "d", "label": "Template library with governance and iteration", "score": 4 } ], "defaultNextPageId": "aiC_02_taxonomy" }, { "type": "question", "id": "aiC_02_taxonomy", "dimensionId": "knowledge_architecture", "prompt": "How consistent is your taxonomy (personas, workflows, features) across assets?", "options": [ { "id": "a", "label": "Inconsistent or missing", "score": 1 }, { "id": "b", "label": "Basic taxonomy, applied unevenly", "score": 2 }, { "id": "c", "label": "Defined taxonomy with periodic cleanup", "score": 3 }, { "id": "d", "label": "Governed taxonomy enforced in workflows", "score": 4 } ], "defaultNextPageId": "aiC_03_qa" }, { "type": "question", "id": "aiC_03_qa", "dimensionId": "operating_system", "prompt": "Which best describes QA for AI-assisted content?", "options": [ { "id": "a", "label": "No consistent QA gates", "score": 1 }, { "id": "b", "label": "Manual review happens inconsistently", "score": 2 }, { "id": "c", "label": "Defined review steps for key assets", "score": 3 }, { "id": "d", "label": "Governed QA + evaluation + audit trail", "score": 4 } ], "defaultNextPageId": "aiC_04_change_detection" }, { "type": "question", "id": "aiC_04_change_detection", "dimensionId": "knowledge_architecture", "prompt": "When product changes ship, how do you detect and route content updates?", "options": [ { "id": "a", "label": "Someone notices; updates lag", "score": 1 }, { "id": "b", "label": "Release notes prompt manual updates", "score": 2 }, { "id": "c", "label": "Checklist-based update workflow", "score": 3 }, { "id": "d", "label": "Automated change detection + governed update pipeline", "score": 4 } ], "defaultNextPageId": "aiC_05_reuse" }, { "type": "question", "id": "aiC_05_reuse", "dimensionId": "knowledge_architecture", "prompt": "How modular and reusable is your content for AI-assisted assembly (micro-content, blocks, single-source)?", "options": [ { "id": "a", "label": "Mostly monolithic assets", "score": 1 }, { "id": "b", "label": "Some reuse, mostly manual", "score": 2 }, { "id": "c", "label": "Modular by design for common patterns", "score": 3 }, { "id": "d", "label": "Single-source + composable content used across channels", "score": 4 } ], "defaultNextPageId": "aiC_06_localization" }, { "type": "question", "id": "aiC_06_localization", "dimensionId": "operating_system", "prompt": "How governed is AI-assisted localization (terminology, accuracy, review)?", "options": [ { "id": "a", "label": "Not governed / not supported", "score": 1 }, { "id": "b", "label": "Ad hoc translation, inconsistent terminology", "score": 2 }, { "id": "c", "label": "Terminology list + review for key content", "score": 3 }, { "id": "d", "label": "Workflow with terminology governance + QA metrics", "score": 4 } ], "defaultNextPageId": "q26_rejoin_ai_impact" }, /* ---------- BRANCH 3: AI WORKFLOW AUTOMATION (Questions 20–25) ---------- */ { "type": "question", "id": "aiW_01_workflows", "dimensionId": "ai_workflow", "prompt": "How standardized are your AI-assisted workflows (tagging, QA, updates, recommendations)?", "options": [ { "id": "a", "label": "Not standardized", "score": 1 }, { "id": "b", "label": "Some repeatable steps, varies by person", "score": 2 }, { "id": "c", "label": "Standard workflows for priority processes", "score": 3 }, { "id": "d", "label": "Workflow library with governance + telemetry", "score": 4 } ], "defaultNextPageId": "aiW_02_routing" }, { "type": "question", "id": "aiW_02_routing", "dimensionId": "gtm_orchestration", "prompt": "How do learning signals route actions across teams (CS, Support, Product, Sales)?", "options": [ { "id": "a", "label": "They do not route actions", "score": 1 }, { "id": "b", "label": "Manual routing via messages and meetings", "score": 2 }, { "id": "c", "label": "Routing exists for select segments/use cases", "score": 3 }, { "id": "d", "label": "Automated routing with defined triggers and owners", "score": 4 } ], "defaultNextPageId": "aiW_03_interventions" }, { "type": "question", "id": "aiW_03_interventions", "dimensionId": "flywheel_signals", "prompt": "Which statement best describes how interventions are triggered?", "options": [ { "id": "a", "label": "Primarily manual / reactive", "score": 1 }, { "id": "b", "label": "Triggered by scheduled lifecycle moments", "score": 2 }, { "id": "c", "label": "Triggered by a small set of signals", "score": 3 }, { "id": "d", "label": "Triggered by signals with measurement of outcomes", "score": 4 } ], "defaultNextPageId": "aiW_04_evaluation" }, { "type": "question", "id": "aiW_04_evaluation", "dimensionId": "operating_system", "prompt": "How do you evaluate whether AI-assisted workflows improve outcomes (not just output)?", "options": [ { "id": "a", "label": "We don’t evaluate", "score": 1 }, { "id": "b", "label": "Anecdotal feedback", "score": 2 }, { "id": "c", "label": "Basic before/after metrics for select workflows", "score": 3 }, { "id": "d", "label": "Defined evaluation plan with repeatable measurement", "score": 4 } ], "defaultNextPageId": "aiW_05_data_pipelines" }, { "type": "question", "id": "aiW_05_data_pipelines", "dimensionId": "operating_system", "prompt": "How mature are your data pipelines for education telemetry (events, identity, joins)?", "options": [ { "id": "a", "label": "No reliable pipeline", "score": 1 }, { "id": "b", "label": "Partial / manual pipeline", "score": 2 }, { "id": "c", "label": "Reliable pipeline for core sources", "score": 3 }, { "id": "d", "label": "Governed pipeline with event standards and quality checks", "score": 4 } ], "defaultNextPageId": "aiW_06_observability" }, { "type": "question", "id": "aiW_06_observability", "dimensionId": "operating_system", "prompt": "How observable are AI workflow decisions (why it triggered, what it used, who approved)?", "options": [ { "id": "a", "label": "Not observable", "score": 1 }, { "id": "b", "label": "Partially observable in scattered logs", "score": 2 }, { "id": "c", "label": "Observable for key workflows and incidents", "score": 3 }, { "id": "d", "label": "Observable by design with audit trail and dashboards", "score": 4 } ], "defaultNextPageId": "q26_rejoin_ai_impact" }, /* ---------- BRANCH 4: AGENTIC AI (Questions 20–25) ---------- */ { "type": "question", "id": "aiA_01_agents", "dimensionId": "ai_workflow", "prompt": "Which statement best describes your use of AI agents (autonomous or semi-autonomous workflows)?", "options": [ { "id": "a", "label": "No agent-like workflows", "score": 1 }, { "id": "b", "label": "Prototypes exist, not in production", "score": 2 }, { "id": "c", "label": "Agents support specific workflows with human approval", "score": 3 }, { "id": "d", "label": "Agents run key loops with governance and controls", "score": 4 } ], "defaultNextPageId": "aiA_02_next_best_action" }, { "type": "question", "id": "aiA_02_next_best_action", "dimensionId": "personalization", "prompt": "How are “next-best learning actions” generated for accounts or users?", "options": [ { "id": "a", "label": "Not generated", "score": 1 }, { "id": "b", "label": "Manual recommendations by CS/education", "score": 2 }, { "id": "c", "label": "Rules-based recommendations for segments", "score": 3 }, { "id": "d", "label": "AI recommendations using signals + feedback loops", "score": 4 } ], "defaultNextPageId": "aiA_03_guardrails" }, { "type": "question", "id": "aiA_03_guardrails", "dimensionId": "operating_system", "prompt": "Which statement best describes your guardrails for agent actions?", "options": [ { "id": "a", "label": "Few or no guardrails", "score": 1 }, { "id": "b", "label": "Manual approvals sometimes required", "score": 2 }, { "id": "c", "label": "Defined thresholds + approvals for sensitive actions", "score": 3 }, { "id": "d", "label": "Policy-driven guardrails + audit trail + escalation paths", "score": 4 } ], "defaultNextPageId": "aiA_04_evals" }, { "type": "question", "id": "aiA_04_evals", "dimensionId": "operating_system", "prompt": "How do you evaluate agent performance (accuracy, safety, usefulness) over time?", "options": [ { "id": "a", "label": "We do not evaluate systematically", "score": 1 }, { "id": "b", "label": "Spot checks when issues arise", "score": 2 }, { "id": "c", "label": "Regular evaluation for key workflows", "score": 3 }, { "id": "d", "label": "Automated evals + regression checks + dashboards", "score": 4 } ], "defaultNextPageId": "aiA_05_orchestration" }, { "type": "question", "id": "aiA_05_orchestration", "dimensionId": "gtm_orchestration", "prompt": "How well do agent-driven interventions coordinate across CS/support/product workflows?", "options": [ { "id": "a", "label": "Not coordinated", "score": 1 }, { "id": "b", "label": "Coordinated informally", "score": 2 }, { "id": "c", "label": "Coordinated for select plays/segments", "score": 3 }, { "id": "d", "label": "Coordinated through governed playbooks and routing", "score": 4 } ], "defaultNextPageId": "aiA_06_observability" }, { "type": "question", "id": "aiA_06_observability", "dimensionId": "operating_system", "prompt": "How observable are agent decisions (inputs used, reasoning summary, outcomes)?", "options": [ { "id": "a", "label": "Not observable", "score": 1 }, { "id": "b", "label": "Partially observable through logs", "score": 2 }, { "id": "c", "label": "Observable for key workflows", "score": 3 }, { "id": "d", "label": "Observable by design with audits and incident playbooks", "score": 4 } ], "defaultNextPageId": "q26_rejoin_ai_impact" }, /* ---------- REJOIN (Question 26) ---------- */ { "type": "question", "id": "q26_rejoin_ai_impact", "dimensionId": "outcome_governance", "prompt": "How has AI changed your customer education operating model so far?", "options": [ { "id": "a", "label": "It hasn’t changed", "score": 1 }, { "id": "b", "label": "Some change, mostly incremental", "score": 2 }, { "id": "c", "label": "Significant change to how we design and deliver", "score": 3 }, { "id": "d", "label": "Fundamental shift in how we run the program", "score": 4 } ], "defaultNextPageId": "n3" }, { "type": "narrative", "id": "n3", "title": "Back to the flywheel", "body": ["Next we’ll measure the loop: signals → interventions → outcomes.", "This is where systems become business impact."], "defaultNextPageId": "q27_outcomes_tied" }, /* ---------- POST-AI CORE (Questions 27–40) ---------- */ { "type": "question", "id": "q27_outcomes_tied", "dimensionId": "outcome_governance", "prompt": "Which statement best describes how outcomes are tied to education?", "options": [ { "id": "a", "label": "Outcomes are not formally tied", "score": 1 }, { "id": "b", "label": "Tied to a few metrics, mostly engagement", "score": 2 }, { "id": "c", "label": "Tied to adoption/TTFV/retention for priority segments", "score": 3 }, { "id": "d", "label": "Tied to defined behavior and business outcomes with iteration", "score": 4 } ], "defaultNextPageId": "q28_measurement_constraints" }, { "type": "question", "id": "q28_measurement_constraints", "dimensionId": "operating_system", "prompt": "What best describes your biggest limitation in measuring downstream impact?", "options": [ { "id": "a", "label": "Unclear success definitions", "score": 1 }, { "id": "b", "label": "Disconnected systems", "score": 2 }, { "id": "c", "label": "Limited access to end-user/product data", "score": 3 }, { "id": "d", "label": "We can measure impact reliably for priority use cases", "score": 4 } ], "defaultNextPageId": "q29_participation_driver" }, { "type": "question", "id": "q29_participation_driver", "dimensionId": "flywheel_signals", "prompt": "What most reliably drives customer participation in learning?", "options": [ { "id": "a", "label": "Marketing/awareness campaigns", "score": 1 }, { "id": "b", "label": "Customer success recommendation", "score": 2 }, { "id": "c", "label": "Immediate problem solving / moment of need", "score": 3 }, { "id": "d", "label": "Signals-based engagement embedded in workflows", "score": 4 } ], "defaultNextPageId": "q30_inactive_strategy" }, { "type": "question", "id": "q30_inactive_strategy", "dimensionId": "flywheel_signals", "prompt": "How do you engage inactive or non-participating customers?", "options": [ { "id": "a", "label": "No specific strategy", "score": 1 }, { "id": "b", "label": "Reminder messages or campaigns", "score": 2 }, { "id": "c", "label": "CS outreach for select accounts", "score": 3 }, { "id": "d", "label": "Triggered re-engagement based on signals and risk", "score": 4 } ], "defaultNextPageId": "q31_cert_strategy" }, { "type": "question", "id": "q31_cert_strategy", "dimensionId": "outcome_governance", "prompt": "How does your program approach certifications?", "options": [ { "id": "a", "label": "We do not offer certifications", "score": 1 }, { "id": "b", "label": "We offer a single certification", "score": 2 }, { "id": "c", "label": "We offer multiple or tiered certifications", "score": 3 }, { "id": "d", "label": "Certifications are tied to role proficiency and outcomes", "score": 4 } ], "defaultNextPageId": "q32_practice_in_product" }, { "type": "question", "id": "q32_practice_in_product", "dimensionId": "personalization", "prompt": "How embedded is learning in the product or workflow (in-app, contextual, moment-of-need)?", "options": [ { "id": "a", "label": "Not embedded", "score": 1 }, { "id": "b", "label": "Occasional links or help articles", "score": 2 }, { "id": "c", "label": "Embedded for key workflows", "score": 3 }, { "id": "d", "label": "Embedded + personalized based on user context", "score": 4 } ], "defaultNextPageId": "q33_persona_recs" }, { "type": "question", "id": "q33_persona_recs", "dimensionId": "personalization", "prompt": "How targeted are recommendations by persona, role, or workflow?", "options": [ { "id": "a", "label": "Not targeted", "score": 1 }, { "id": "b", "label": "Broad segments only", "score": 2 }, { "id": "c", "label": "Role-based recommendations for priority personas", "score": 3 }, { "id": "d", "label": "Personalized recommendations using signals + feedback", "score": 4 } ], "defaultNextPageId": "q34_content_contributors" }, { "type": "question", "id": "q34_content_contributors", "dimensionId": "gtm_orchestration", "prompt": "Which statement best describes cross-functional content contribution (Product, Support, CS, Marketing)?", "options": [ { "id": "a", "label": "Contributions are rare and ad hoc", "score": 1 }, { "id": "b", "label": "Contributions occur but are hard to coordinate", "score": 2 }, { "id": "c", "label": "Defined contribution process for key groups", "score": 3 }, { "id": "d", "label": "Governed contribution pipeline with standards and ownership", "score": 4 } ], "defaultNextPageId": "q35_support_ticket_loop" }, { "type": "question", "id": "q35_support_ticket_loop", "dimensionId": "flywheel_signals", "prompt": "How often do support tickets or known issues feed education updates and interventions?", "options": [ { "id": "a", "label": "Rarely or never", "score": 1 }, { "id": "b", "label": "Occasionally, based on severity", "score": 2 }, { "id": "c", "label": "Regularly for top drivers", "score": 3 }, { "id": "d", "label": "Routed automatically with measurement of impact", "score": 4 } ], "defaultNextPageId": "q36_product_release_loop" }, { "type": "question", "id": "q36_product_release_loop", "dimensionId": "knowledge_architecture", "prompt": "How consistently does education keep pace with product releases?", "options": [ { "id": "a", "label": "Often lags behind", "score": 1 }, { "id": "b", "label": "Catches up later for major releases", "score": 2 }, { "id": "c", "label": "Tracks releases for priority features", "score": 3 }, { "id": "d", "label": "Release-to-education loop is governed and repeatable", "score": 4 } ], "defaultNextPageId": "q37_account_visibility" }, { "type": "question", "id": "q37_account_visibility", "dimensionId": "flywheel_signals", "prompt": "How visible is learning progress and readiness at the account level for CS leadership?", "options": [ { "id": "a", "label": "Not visible", "score": 1 }, { "id": "b", "label": "Visible in LMS reports only", "score": 2 }, { "id": "c", "label": "Visible via dashboards that combine multiple sources", "score": 3 }, { "id": "d", "label": "Visible + used to drive plays and forecasts", "score": 4 } ], "defaultNextPageId": "q38_experimentation" }, { "type": "question", "id": "q38_experimentation", "dimensionId": "outcome_governance", "prompt": "How often do you run structured experiments (A/B, before/after) on education interventions?", "options": [ { "id": "a", "label": "Never", "score": 1 }, { "id": "b", "label": "Rarely (when urgent)", "score": 2 }, { "id": "c", "label": "Sometimes for priority initiatives", "score": 3 }, { "id": "d", "label": "Regularly with a repeatable evaluation practice", "score": 4 } ], "defaultNextPageId": "q39_scaling_constraint" }, { "type": "question", "id": "q39_scaling_constraint", "dimensionId": "operating_system", "prompt": "What most limits your ability to scale customer education today?", "options": [ { "id": "a", "label": "Team capacity / bandwidth", "score": 1 }, { "id": "b", "label": "Internal alignment and ownership", "score": 2 }, { "id": "c", "label": "Data visibility and systems", "score": 3 }, { "id": "d", "label": "We can scale through systems, reuse, and governed workflows", "score": 4 } ], "defaultNextPageId": "q40_program_stage" }, { "type": "question", "id": "q40_program_stage", "dimensionId": "operating_system", "prompt": "Which statement best describes your customer education program stage today?", "options": [ { "id": "a", "label": "Launching or early stage", "score": 1 }, { "id": "b", "label": "Scaling and formalizing", "score": 2 }, { "id": "c", "label": "Mature and continuously optimized", "score": 3 }, { "id": "d", "label": "Rebuilding with a governed operating model", "score": 4 } ], "defaultNextPageId": "outro" }, { "type": "outro", "id": "outro" } ], "scoring": { "dimensionQuestions": { "outcome_governance": [ "q01_leadership_view", "q07_value_priority", "q16_measurement_usage", "q17_roi_confidence", "q26_rejoin_ai_impact", "q27_outcomes_tied", "q31_cert_strategy", "q38_experimentation" ], "flywheel_signals": [ "q12_signal_definition", "q13_engagement_trigger", "q14_proactivity", "q29_participation_driver", "q30_inactive_strategy", "q35_support_ticket_loop", "q37_account_visibility" ], "ai_workflow": [ "q19_ai_gateway", "aiF_05_first_use_case", "aiW_01_workflows", "aiA_01_agents" ], "knowledge_architecture": [ "q08_design_approach", "q09_content_refresh", "aiC_01_standards", "aiC_02_taxonomy", "aiC_04_change_detection", "aiC_05_reuse", "q36_product_release_loop" ], "gtm_orchestration": [ "q06_lifecycle_focus", "q15_crossfunctional_engagement", "aiW_02_routing", "aiA_05_orchestration", "q34_content_contributors" ], "personalization": [ "aiA_02_next_best_action", "q32_practice_in_product", "q33_persona_recs" ], "operating_system": [ "q02_exec_sponsor", "q03_funding_model", "q04_team_size", "q05_strategy_owner", "q10_systems_in_experience", "q11_integration_depth", "q18_gov_cadence", "aiF_01_policy", "aiF_02_tools", "aiF_03_data_access", "aiF_04_skills", "aiF_06_readiness_barrier", "aiC_03_qa", "aiC_06_localization", "aiW_04_evaluation", "aiW_05_data_pipelines", "aiW_06_observability", "aiA_03_guardrails", "aiA_04_evals", "aiA_06_observability", "q28_measurement_constraints", "q39_scaling_constraint", "q40_program_stage" ] }, "derivedIndicators": [ { "id": "flywheel_signal_strength", "label": "Flywheel Signal Strength", "questionIds": [ "q12_signal_definition", "q13_engagement_trigger", "q14_proactivity", "q37_account_visibility", "q35_support_ticket_loop" ] }, { "id": "agentic_readiness_index", "label": "Agentic Readiness Index", "questionIds": [ "q19_ai_gateway", "q11_integration_depth", "aiW_05_data_pipelines", "aiA_03_guardrails", "aiA_04_evals" ] }, { "id": "agentic_depth_index", "label": "Agentic Depth Index", "questionIds": [ "aiA_01_agents", "aiA_02_next_best_action", "aiA_03_guardrails", "aiA_04_evals", "aiA_06_observability" ] } ] }, "routingNotes": { "rules": [ "If an option has nextPageId, navigate there.", "Else if the page has defaultNextPageId, navigate there.", "Else navigate to the next page in pages[] order." ], "branchRejoinPageId": "q26_rejoin_ai_impact" } }