<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="stratml_AI_Highlight.xsl"?>
<StrategicPlan xmlns="urn:ISO:std:iso:17469:tech:xsd:stratml_core" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
  <Name>AI Governance Definition &amp; Accountability Framework</Name>
  <Description>A plan to establish a unified definition of AI governance grounded in decision authority, accountability, and enforceable oversight mechanisms.</Description>
  <OtherInformation>Submitter&apos;s Note: This StratML rendition has been inferred by ChatGPT from Basil C. Puglisi&apos;s article &quot;AI Governance Has No Formal Definition. That Is a Problem for Every Organization Using AI.&quot; It has been lightly edited in the form at https://stratml.us/forms/Claude/Part1.html</OtherInformation>
  <StrategicPlanCore>
    <Organization>
      <Name>AI Governance Ecosystem</Name>
      <Acronym>AIGE</Acronym>
      <Identifier>8b7b0f85-eb33-49e3-9a80-9b952e3ba71e</Identifier>
      <Description>Institutions, organizations, and individuals responsible for defining, implementing, and enforcing AI governance.</Description>
      <Stakeholder StakeholderTypeType="Person">
        <Name>Basil C. Puglisi</Name>
        <Description>Author proposing a formal definition of AI governance grounded in accountability and decision authority.</Description>
      </Stakeholder>
      <Stakeholder StakeholderTypeType="Generic_Group">
        <Name>Standards Organizations</Name>
        <Description>Entities such as ISO responsible for governance and management system standards.</Description>
      </Stakeholder>
      <Stakeholder StakeholderTypeType="Generic_Group">
        <Name>Regulators</Name>
        <Description>Authorities establishing binding obligations for AI systems.</Description>
      </Stakeholder>
      <Stakeholder StakeholderTypeType="Generic_Group">
        <Name>Organizations</Name>
        <Description>Enterprises implementing AI systems and governance structures.</Description>
      </Stakeholder>
      <Stakeholder StakeholderTypeType="Generic_Group">
        <Name>Accountable Individuals</Name>
        <Description>Named humans holding decision authority and accountability for AI outputs.</Description>
      </Stakeholder>
    </Organization>
    <Vision>
      <Description>Clearly defined AI governance enabling accountable human oversight of AI systems across all institutional contexts.</Description>
      <Identifier>df31dc18-dbfb-4775-a7be-16ff38d0dec4</Identifier>
    </Vision>
    <Mission>
      <Description>To establish a unified definition of AI governance grounded in decision authority, accountability, and enforceable oversight mechanisms.</Description>
      <Identifier>4e10a3c7-d7aa-416a-938e-cbfe296a2c42</Identifier>
    </Mission>
    <Value>
      <Name>Accountability</Name>
      <Description>Ensure identifiable individuals are responsible for AI decisions and outcomes.</Description>
    </Value>
    <Value>
      <Name>Authority</Name>
      <Description>Assign decision-making power to qualified individuals with the ability to approve, modify, or halt AI outputs.</Description>
    </Value>
    <Value>
      <Name>Transparency</Name>
      <Description>Enable traceability and auditability of decisions affecting AI outputs.</Description>
    </Value>
    <Value>
      <Name>Responsibility</Name>
      <Description>Align AI use with ethical, legal, and societal expectations.</Description>
    </Value>
    <Value>
      <Name>Clarity</Name>
      <Description>Define governance concepts unambiguously across institutional contexts.</Description>
    </Value>
    <Goal>
      <Name>Definition</Name>
      <Description>Establish a clear and unified definition of AI governance across institutions and frameworks.</Description>
      <Identifier>4d647729-eef9-44e2-985d-fd068ba8acf9</Identifier>
      <SequenceIndicator>1</SequenceIndicator>
      <OtherInformation>None of the major institutional frameworks define AI governance explicitly.</OtherInformation>
      <Objective>
        <Name>Conceptual Definition</Name>
        <Description>Define AI governance as decision authority, accountability structures, and oversight mechanisms exercised by named humans.</Description>
        <Identifier>eab849ab-a045-45e4-af62-f2dfdd39e798</Identifier>
        <SequenceIndicator>1.1</SequenceIndicator>
      </Objective>
      <Objective>
        <Name>Applied Definition</Name>
        <Description>Specify governance as the presence of a qualified human with binding authority and accountability at defined checkpoints.</Description>
        <Identifier>34775da4-ea95-4e3d-9c03-132b92680cd3</Identifier>
        <SequenceIndicator>1.2</SequenceIndicator>
      </Objective>
    </Goal>
    <Goal>
      <Name>Framework Alignment</Name>
      <Description>Integrate governance concepts across institutional frameworks and standards.</Description>
      <Identifier>6e6578d5-c685-45f2-897a-1ab137e0784e</Identifier>
      <SequenceIndicator>2</SequenceIndicator>
      <OtherInformation>ISO, NIST, OECD, EU AI Act, and other frameworks provide partial but non-integrated perspectives.</OtherInformation>
      <Objective>
        <Name>Accountability Structures</Name>
        <Description>Incorporate accountability and decision authority as defined in organizational governance standards.</Description>
        <Identifier>3efc5de0-ae88-4b2d-85a3-487603dc46c8</Identifier>
        <SequenceIndicator>2.1</SequenceIndicator>
      </Objective>
      <Objective>
        <Name>Human Oversight</Name>
        <Description>Ensure alignment with principles requiring human responsibility and oversight of AI systems.</Description>
        <Identifier>6fb599f4-a9b5-445e-8eca-2198aed0c123</Identifier>
        <SequenceIndicator>2.2</SequenceIndicator>
      </Objective>
      <Objective>
        <Name>Risk Posture</Name>
        <Description>Embed governance within organizational culture and risk management practices.</Description>
        <Identifier>b42a02d7-5faf-4fe6-a59d-abdca21e3e20</Identifier>
        <SequenceIndicator>2.3</SequenceIndicator>
      </Objective>
    </Goal>
    <Goal>
      <Name>Accountability</Name>
      <Description>Ensure governance includes enforceable accountability for AI outputs.</Description>
      <Identifier>c32aa603-32c6-4126-b335-95a04a1f3ba5</Identifier>
      <SequenceIndicator>3</SequenceIndicator>
      <OtherInformation>Governance requires a named human whose decisions can be audited and held accountable.</OtherInformation>
      <Objective>
        <Name>Decision Authority</Name>
        <Description>Assign binding authority to individuals to approve, modify, or halt AI outputs.</Description>
        <Identifier>8ae77216-11b2-4cb7-bb06-cd04a2569166</Identifier>
        <SequenceIndicator>3.1</SequenceIndicator>
      </Objective>
      <Objective>
        <Name>Auditability</Name>
        <Description>Enable tracing and review of decisions affecting AI outputs.</Description>
        <Identifier>9e0a3e87-bbca-48e4-8ebb-913e238939a6</Identifier>
        <SequenceIndicator>3.2</SequenceIndicator>
      </Objective>
      <Objective>
        <Name>Liability Channels</Name>
        <Description>Ensure accountability through moral, employment, civil, and criminal mechanisms.</Description>
        <Identifier>eaef4394-8816-4133-94b4-bdd58cd05192</Identifier>
        <SequenceIndicator>3.3</SequenceIndicator>
      </Objective>
    </Goal>
    <Goal>
      <Name>Distinction</Name>
      <Description>Differentiate AI governance from responsible AI and management systems.</Description>
      <Identifier>5fd7ece3-68b8-4950-a5ad-5f90a077c7cd</Identifier>
      <SequenceIndicator>4</SequenceIndicator>
      <OtherInformation>Responsible AI focuses on engineering; governance requires accountable human authority.</OtherInformation>
      <Objective>
        <Name>Responsible AI</Name>
        <Description>Define responsible AI as engineering practices embedding ethical principles into systems.</Description>
        <Identifier>4ada199d-f0d5-4652-850f-28fb751b19ec</Identifier>
        <SequenceIndicator>4.1</SequenceIndicator>
      </Objective>
      <Objective>
        <Name>Governance Threshold</Name>
        <Description>Establish the requirement of named human accountability as the threshold for governance.</Description>
        <Identifier>68896da0-e240-463a-8024-3301c4dc3922</Identifier>
        <SequenceIndicator>4.2</SequenceIndicator>
      </Objective>
    </Goal>
    <Goal>
      <Name>Operational Test</Name>
      <Description>Provide criteria for determining whether AI governance exists in practice.</Description>
      <Identifier>8e5dd74f-bc24-4ee6-acb6-66f6cb9ddadc</Identifier>
      <SequenceIndicator>5</SequenceIndicator>
      <Objective>
        <Name>Named Individual</Name>
        <Description>Identify a specific person with authority over AI outputs.</Description>
        <Identifier>595d80c1-6d8d-48cb-8c38-be22af7d240e</Identifier>
        <SequenceIndicator>5.1</SequenceIndicator>
      </Objective>
      <Objective>
        <Name>Traceability</Name>
        <Description>Ensure decisions can be documented and reviewed.</Description>
        <Identifier>eb46db45-4091-4eea-94a3-67035ec2b5da</Identifier>
        <SequenceIndicator>5.2</SequenceIndicator>
      </Objective>
      <Objective>
        <Name>Accountability Channels</Name>
        <Description>Confirm applicability of moral, employment, civil, or criminal accountability.</Description>
        <Identifier>8ac55d05-ec1f-499e-8444-e224d192be24</Identifier>
        <SequenceIndicator>5.3</SequenceIndicator>
      </Objective>
    </Goal>
  </StrategicPlanCore>
  <AdministrativeInformation>
    <StartDate>2026-03-15</StartDate>
    <PublicationDate>2026-03-18</PublicationDate>
    <Source>https://basilpuglisi.com/ai-governance-has-no-formal-definition-here-is-one/</Source>
    <Submitter>
      <GivenName>Owen</GivenName>
      <Surname>Ambur</Surname>
      <EmailAddress>Owen.Ambur@verizon.net</EmailAddress>
    </Submitter>
  </AdministrativeInformation>
</StrategicPlan>