Current experimental feature in SmartOS for native and lx branded zones allows, e.g.
[root@smartos /opt/vmconfigs]# cat reprovision.json
{
"alias": "reprovision",
"autoboot": false,
"brand": "joyent",
"max_physical_memory": 256,
"quota": 10,
"image_uuid": "e44ed3e0-910b-11ed-a5d4-00151714048c",
"nics": []
}
[root@smartos /opt/vmconfigs]# vmadm create -f reprovision.json
Successfully created VM 5478633e-a0e5-4d48-8952-c322a2ae6885
[root@smartos /opt/vmconfigs]# UUID=$(vmadm list -Ho uuid alias=reprovision)
[root@smartos /opt/vmconfigs]# zfs get origin zones/${UUID?}
NAME PROPERTY VALUE SOURCE
zones/5478633e-a0e5-4d48-8952-c322a2ae6885 origin zones/e44ed3e0-910b-11ed-a5d4-00151714048c@final -
[root@smartos /opt/vmconfigs]# vmadm reprovision ${UUID?}<<<'{"image_uuid":"2f1dc911-6401-4fa4-8e9d-67ea2e39c271"}'
Successfully reprovisioned VM 5478633e-a0e5-4d48-8952-c322a2ae6885
[root@smartos /opt/vmconfigs]# zfs get origin zones/${UUID?}
NAME PROPERTY VALUE SOURCE
zones/5478633e-a0e5-4d48-8952-c322a2ae6885 origin zones/2f1dc911-6401-4fa4-8e9d-67ea2e39c271@final -
We want that for bhyve to replace disk0 e.g. (EXAMPLE OUTPUT! DOESN’T WORK YET)
[root@smartos /opt/vmconfigs]# cat reprovision-bhyve.json
{
"alias": "reprovision",
"autoboot": false,
"brand": "bhyve",
"max_physical_memory": 256,
"quota": 10,
"disks": [
{
"boot": true,
"compression": "lz4",
"model": "virtio",
"image_uuid": "1cbf6e26-dcfb-4a10-b982-0dedde078bf7"
},
{
"size": 1024,
"compression": "lz4",
"model": "virtio"
}
],
"nics": []
}
[root@smartos /opt/vmconfigs]# vmadm create -f reprovision-bhyve.json
Successfully created VM 16990c37-e714-459b-acdf-921a05daee72
[root@smartos /opt/vmconfigs]# vmadm get ${UUID?} | json disks[0].image_uuid
1cbf6e26-dcfb-4a10-b982-0dedde078bf7
[root@smartos /opt/vmconfigs]# zfs get -r origin zones/${UUID?}
NAME PROPERTY VALUE SOURCE
zones/16990c37-e714-459b-acdf-921a05daee72 origin - -
zones/16990c37-e714-459b-acdf-921a05daee72/disk0 origin zones/1cbf6e26-dcfb-4a10-b982-0dedde078bf7@final -
zones/16990c37-e714-459b-acdf-921a05daee72/disk1 origin - -
[root@smartos /opt/vmconfigs]# vmadm reprovision ${UUID?}<<<'{"image_uuid":"de6c5581-1779-43b4-8925-aca22dd0980b"}'
#ACTUAL OUTPUT: Failed to reprovision VM 16990c37-e714-459b-acdf-921a05daee72: brand "bhyve" does not yet support reprovision
#DESIRED RESULT:
[root@smartos /opt/vmconfigs]# zfs get -r origin zones/${UUID?}
NAME PROPERTY VALUE SOURCE
zones/16990c37-e714-459b-acdf-921a05daee72 origin - -
zones/16990c37-e714-459b-acdf-921a05daee72/disk0 origin zones/de6c5581-1779-43b4-8925-aca22dd0980b@final -
zones/16990c37-e714-459b-acdf-921a05daee72/disk1 origin - -
Test commands for copy-paste:
vmadm create -f /opt/vmconfigs/reprovision-bhyve.json && \
UUID=$(vmadm list -Ho uuid alias=reprovision) && \
vmadm get ${UUID?} | json disks[0].image_uuid && \
zfs get -r origin zones/${UUID?} && \
vmadm reprovision ${UUID?}<<<'{"image_uuid":"de6c5581-1779-43b4-8925-aca22dd0980b"}'
Nahum Shalman commented on 2025-08-08T15:30:15.035-0400 (edited 2025-08-12T10:17:03.098-0400):
Testing completed:
Attempting to reprovision bhyve zone with something other than a zvol fails without breaking the VM.
Attempting to reprovision native zone with a zvol still behaves as expected and does not break the zone.
non-bhyve reprovision still works
bhyve reprovision works
[root@smartos /opt/vmconfigs]# bash -x reprovision-test.sh
+ set -o xtrace
+ set -o errexit
+ echo '--- Native ---'
--- Native ---
+ vmadm create
Successfully created VM 30fbccab-280c-45a1-aaba-a0da89f04ef4
++ vmadm list -Ho uuid alias=reprovision
+ UUID=30fbccab-280c-45a1-aaba-a0da89f04ef4
+ vmadm list -H uuid=30fbccab-280c-45a1-aaba-a0da89f04ef4
30fbccab-280c-45a1-aaba-a0da89f04ef4 OS 256 stopped reprovision
+ echo 'Original image id:'
Original image id:
+ vmadm get 30fbccab-280c-45a1-aaba-a0da89f04ef4
+ json image_uuid
e44ed3e0-910b-11ed-a5d4-00151714048c
+ echo '{"image_uuid":"2f1dc911-6401-4fa4-8e9d-67ea2e39c271"}'
+ vmadm reprovision 30fbccab-280c-45a1-aaba-a0da89f04ef4
Successfully reprovisioned VM 30fbccab-280c-45a1-aaba-a0da89f04ef4
+ echo 'Reprovisioned image id:'
Reprovisioned image id:
+ vmadm get 30fbccab-280c-45a1-aaba-a0da89f04ef4
+ json image_uuid
2f1dc911-6401-4fa4-8e9d-67ea2e39c271
+ vmadm delete 30fbccab-280c-45a1-aaba-a0da89f04ef4
Successfully deleted VM 30fbccab-280c-45a1-aaba-a0da89f04ef4
+ echo '--- BHYVE ---'
--- BHYVE ---
+ vmadm create
Successfully created VM c8123edc-5744-49fa-b7a0-af46e9170722
++ vmadm list -Ho uuid alias=reprovision
+ UUID=c8123edc-5744-49fa-b7a0-af46e9170722
+ vmadm list -H uuid=c8123edc-5744-49fa-b7a0-af46e9170722
c8123edc-5744-49fa-b7a0-af46e9170722 BHYV 256 stopped reprovision
+ echo 'Original image id:'
Original image id:
+ vmadm get c8123edc-5744-49fa-b7a0-af46e9170722
+ json 'disks[0].image_uuid'
1cbf6e26-dcfb-4a10-b982-0dedde078bf7
+ echo '{"image_uuid":"de6c5581-1779-43b4-8925-aca22dd0980b"}'
+ vmadm reprovision c8123edc-5744-49fa-b7a0-af46e9170722
Successfully reprovisioned VM c8123edc-5744-49fa-b7a0-af46e9170722
+ echo 'Reprovisioned image id:'
Reprovisioned image id:
+ vmadm get c8123edc-5744-49fa-b7a0-af46e9170722
+ json 'disks[0].image_uuid'
de6c5581-1779-43b4-8925-aca22dd0980b
+ vmadm delete c8123edc-5744-49fa-b7a0-af46e9170722
Successfully deleted VM c8123edc-5744-49fa-b7a0-af46e9170722
Nahum Shalman commented on 2025-08-08T16:02:47.802-0400 (edited 2025-08-08T16:03:09.588-0400):
I created a VM using our ubuntu 22.04 image and gave it a second disk which ubuntu helpfully formatted and mounted at /data. I copied /etc/os-release over to /data/before-reprovision and then reprovisioned the VM to the 24.04 image. When I logged in I was able to confirm that the VM was now running 24.04 AND that the file on the data drive had been preserved.
root@preserve:~# grep CODENAME /data/before-reprovision /etc/os-release
/data/before-reprovision:VERSION_CODENAME=jammy
/data/before-reprovision:UBUNTU_CODENAME=jammy
/etc/os-release:VERSION_CODENAME=noble
/etc/os-release:UBUNTU_CODENAME=noble
Nahum Shalman commented on 2025-08-13T09:24:16.339-0400:
New bootability check works:
[root@smartos /opt/vmconfigs]# cat reprovision-bhyve-reject.json
{
"alias": "reprovision",
"autoboot": false,
"brand": "bhyve",
"bootrom": "uefi",
"ram": 1024,
"quota": 10,
"disks": [
{
"compression": "lz4",
"model": "virtio",
"image_uuid": "1cbf6e26-dcfb-4a10-b982-0dedde078bf7"
},
{
"boot": true,
"size": 1024,
"compression": "lz4",
"model": "virtio"
}
],
"nics": []
}
[root@smartos /opt/vmconfigs]# vmadm create -f reprovision-bhyve-reject.json
Successfully created VM 02e59134-e0e9-4f81-8cb7-b13839230ba7
[root@smartos /opt/vmconfigs]# UUID=$(vmadm list -Ho uuid alias=reprovision)
[root@smartos /opt/vmconfigs]# echo '{"image_uuid":"de6c5581-1779-43b4-8925-aca22dd0980b"}' | vmadm reprovision ${UUID?}
Failed to reprovision VM 02e59134-e0e9-4f81-8cb7-b13839230ba7: disk0 found but does not have boot property set to true - reprovision requires a bootable disk0
Nahum Shalman commented on 2025-08-14T10:59:48.445-0400:
And for completeness based on a conversation with Marko, I confirmed that a running VM will not be stopped if an invalid payload is provided.
[root@smartos /opt/vmconfigs]# vmadm list alias=reprovision
UUID TYPE RAM STATE ALIAS
a4eef5b6-3423-4fc1-b944-354d1fcbaf47 BHYV 1024 running reprovision
[root@smartos /opt/vmconfigs]# UUID=$(vmadm list -Ho uuid alias=reprovision)
[root@smartos /opt/vmconfigs]# echo '{"image_uuid":"2f1dc911-6401-4fa4-8e9d-67ea2e39c271"}' | vmadm reprovision ${UUID?}
Failed to reprovision VM a4eef5b6-3423-4fc1-b944-354d1fcbaf47: image 2f1dc911-6401-4fa4-8e9d-67ea2e39c271 is type zone-dataset, but must be one of: ["zvol"]
[root@smartos /opt/vmconfigs]# vmadm list alias=reprovision
UUID TYPE RAM STATE ALIAS
a4eef5b6-3423-4fc1-b944-354d1fcbaf47 BHYV 1024 running reprovision
Nahum Shalman commented on 2025-08-14T11:21:18.551-0400:
And for extra completeness, lx reprovision also remains unbroken:
[root@smartos /opt/vmconfigs]# vmadm create -f reprovision-lx.json
Successfully created VM afde01ba-46ba-4ffe-b861-26ec57518172
[root@smartos /opt/vmconfigs]# zlogin afde01ba-46ba-4ffe-b861-26ec57518172 cat /etc/os-release
PRETTY_NAME="Ubuntu 24.04.1 LTS"
NAME="Ubuntu"
VERSION_ID="24.04"
VERSION="24.04.1 LTS (Noble Numbat)"
VERSION_CODENAME=noble
ID=ubuntu
ID_LIKE=debian
HOME_URL="https://www.ubuntu.com/"
SUPPORT_URL="https://help.ubuntu.com/"
BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/"
PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy"
UBUNTU_CODENAME=noble
LOGO=ubuntu-logo
[root@smartos /opt/vmconfigs]# UUID=$(vmadm list -Ho uuid alias=reprovision)
[root@smartos /opt/vmconfigs]# echo '{"image_uuid":"82474e53-54f6-40f6-b610-eb0ada63c64c"}' | vmadm reprovision ${UUID?}
Successfully reprovisioned VM afde01ba-46ba-4ffe-b861-26ec57518172
[root@smartos /opt/vmconfigs]# zlogin $UUID cat /etc/os-release
NAME="Void"
ID="void"
PRETTY_NAME="Void Linux"
HOME_URL="https://voidlinux.org/"
DOCUMENTATION_URL="https://docs.voidlinux.org/"
LOGO="void-logo"
ANSI_COLOR="0;38;2;71;128;97"
DISTRIB_ID="void"
Nahum Shalman commented on 2025-08-14T14:14:32.149-0400:
Reuse of the existing codepath means we already respect indestructible_zoneroot:
[root@smartos /opt/vmconfigs]# vmadm create -f reprovision-bhyve-indestructible.json
Successfully created VM a973e84a-ba2b-4b53-952e-ebb5da84abc3
[root@smartos /opt/vmconfigs]# UUID=$(vmadm list -Ho uuid alias=reprovision)
[root@smartos /opt/vmconfigs]# vmadm delete $UUID
Failed to delete VM a973e84a-ba2b-4b53-952e-ebb5da84abc3: indestructible_zoneroot is set, cannot delete
[root@smartos /opt/vmconfigs]# echo '{"image_uuid":"82474e53-54f6-40f6-b610-eb0ada63c64c"}' | vmadm reprovision ${UUID?}
Failed to reprovision VM a973e84a-ba2b-4b53-952e-ebb5da84abc3: indestructible_zoneroot is set, cannot reprovision
Nahum Shalman commented on 2025-08-29T10:49:43.971-0400 (edited 2025-09-03T13:45:32.512-0400):
I had Claude help me do my testing. A few of these tests are a bit redundant, but I want to be done with this.
Test code:
#!/usr/bin/env bash
#
# VM Reprovision Test Suite
#
# This test suite validates the vmadm reprovision functionality for both native zones
# and bhyve VMs, with comprehensive coverage of the decision tree documented in
# reprovision-flowchart.md.
#
# REQUIRED TESTS FOR VALIDATION:
#
# == PRE-BRAND CHECK TESTS (Initial Validation) ==
# 1. Brand Support Validation
# - Test bhyve brand has 'reprovision' feature enabled
# - Test bhyve brand has 'brand_install_script' defined
# - Test native zone brands (joyent, joyent-minimal) work as before
#
# 2. Payload Validation
# - Test missing image_uuid in payload (should fail)
# - Test invalid image_uuid format (should fail)
# - Test valid image_uuid payload (should succeed)
#
# 3. VM Constraints
# - Test indestructible_zoneroot blocking reprovision
#
# 4. VM State Management
# - Test reprovision on running VM (should stop automatically)
# - Test reprovision on stopped VM (should proceed)
# - Test VM stop timeout scenarios with force halt fallback
# - Test provisioning state transition success/failure
#
# == BHYVE-SPECIFIC TESTS ==
# 5. Disk Validation (Critical - New Feature)
# - Test disk0 existence check (must exist)
# - Test disk0 bootable flag check (must be bootable)
# - Test multiple disks scenario (only disk0 should be replaced)
#
# 6. Disk Size Validation (NEW - OS-8683)
# - Test image size == disk0 size (boundary condition - should work)
# - Test image size < disk0 size (should work)
# - Test image size > disk0 size (should fail BEFORE state transition)
# - Test very large image vs small disk (should fail gracefully)
# - Test validateBhyveDiskSizes helper function directly
#
# 7. Image Type Validation
# - Test zvol image type for bhyve (should work)
# - Test zone-dataset image type for bhyve (should fail)
# - Test invalid/missing image type (should fail)
#
# 8. Snapshot Management
# - Test existing @final snapshot (should use it)
# - Test existing @uuid snapshot (should use it)
# - Test missing snapshots (should create new ones)
# - Test snapshot creation failure scenarios
#
# == NATIVE ZONE TESTS ==
# 9. Native Zone Workflow
# - Test zone-dataset image reprovision
# - Test dataset preparation (unmount, rename for backup)
# - Test config directory preservation
# - Test delegated dataset restoration
#
# == ERROR HANDLING TESTS ==
# 10. Pre-Validation Failures
# - Test VM stays in original state when disk size validation fails
# - Test no state transition occurs on validation errors
# - Test proper error messages for each failure type
#
# 11. Mid-Process Failures
# - Test snapshot clone failure handling
# - Test brand install script failures
# - Test disk replacement failures
# - Test cleanup on partial completion
#
# 12. Rollback Scenarios
# - Test VM state restoration after failures
# - Test filesystem cleanup on errors
# - Test proper error propagation to vmadm command
#
# == EDGE CASE TESTS ==
# 13. Autoboot Scenarios
# - Test VMs with autoboot=true (should start after reprovision)
# - Test VMs with autoboot=false (should remain stopped)
#
# 14. Concurrent Operations
# - Test multiple reprovision attempts on same VM (should serialize)
# - Test reprovision during other VM operations
#
# 15. Resource Limits
# - Test very large VMs (quota limits, timeout handling)
# - Test disk space constraints during reprovision
#
# == INTEGRATION TESTS ==
# 16. End-to-End Workflows
# - Test complete native zone reprovision workflow
# - Test complete bhyve reprovision workflow
# - Test VM functionality after reprovision (boot, network, services)
#
# 17. Configuration Preservation
# - Test VM metadata preservation (tags, customer_metadata, etc.)
# - Test network configuration preservation
# - Test bhyve-specific settings (com1/com2, bootrom, etc.)
# - Test disk configuration preservation (non-boot disks)
#
# 18. Image Compatibility Matrix
# - Test same OS family reprovision (Linux -> Linux)
# - Test different versions within family (Ubuntu 20.04 -> 22.04)
# - Test boundary cases for supported transitions
#
# TEST EXECUTION NOTES:
# - Each test should create, modify, and cleanup its own VMs
# - Tests should validate both success and failure paths
# - Error messages should be specific and actionable
# - State should be verified before and after operations
# - Use different image UUIDs and disk sizes to cover size validation
set -o errexit
NATIVE_UUID1=e44ed3e0-910b-11ed-a5d4-00151714048c
NATIVE_UUID2=2f1dc911-6401-4fa4-8e9d-67ea2e39c271
BHYVE_UUID1=1cbf6e26-dcfb-4a10-b982-0dedde078bf7
BHYVE_UUID2=de6c5581-1779-43b4-8925-aca22dd0980b
# Create a native zone VM and return its UUID
# Usage: create_native <alias> <image_uuid>
create_native() {
ALIAS=$1
IMAGE_UUID=$2
vmadm create <<EOF
{
"alias": "${ALIAS?}",
"autoboot": false,
"brand": "joyent",
"max_physical_memory": 256,
"quota": 10,
"image_uuid": "${IMAGE_UUID?}",
"nics": []
}
EOF
UUID=$(vmadm list -Ho uuid alias=${ALIAS?})
}
# Create a bhyve VM with boot disk + data disk and return its UUID
# Usage: create_bhyve <alias> <image_uuid>
create_bhyve() {
ALIAS=$1
IMAGE_UUID=$2
vmadm create <<EOF
{
"alias": "${ALIAS?}",
"autoboot": false,
"brand": "bhyve",
"bootrom": "uefi",
"ram": 1024,
"quota": 10,
"disks": [
{
"boot": true,
"compression": "lz4",
"model": "virtio",
"image_uuid": "${IMAGE_UUID?}"
},
{
"size": 1024,
"compression": "lz4",
"model": "virtio"
}
],
"nics": []
}
EOF
UUID=$(vmadm list -Ho uuid alias=${ALIAS?})
}
# Reprovision a VM to a new image
# Usage: reprovision <vm_uuid> <new_image_uuid>
reprovision() {
UUID=$1
NEW_UUID=$2
echo "{\"image_uuid\":\"${NEW_UUID?}\"}" | vmadm reprovision ${UUID?}
}
# Create a bhyve VM with custom disk size for testing disk size validation
# Usage: create_bhyve_custom_disk <alias> <disk_size_mb>
create_bhyve_custom_disk() {
ALIAS=$1
DISK_SIZE=$2
vmadm create <<EOF
{
"alias": "${ALIAS?}",
"autoboot": false,
"brand": "bhyve",
"bootrom": "uefi",
"ram": 1024,
"quota": 10,
"disks": [
{
"boot": true,
"compression": "lz4",
"model": "virtio",
"size": ${DISK_SIZE?}
}
],
"nics": []
}
EOF
UUID=$(vmadm list -Ho uuid alias=${ALIAS?})
}
# NOTE: All images are 10G, so we test disk size validation by creating
# VMs with smaller boot disks (e.g. 1G) and attempting reprovision with 10G images
# Test reprovision failure due to image size validation
# Usage: test_disk_size_failure <vm_uuid> <too_large_image_uuid>
test_disk_size_failure() {
VM_UUID=$1
LARGE_IMAGE_UUID=$2
echo "Testing disk size validation (image too large):"
echo "Original disk configuration:"
vmadm get ${VM_UUID?} | json disks[0] | json -a size image_uuid
echo "Attempting reprovision with larger image (should fail):"
if reprovision ${VM_UUID?} ${LARGE_IMAGE_UUID?}; then
echo "ERROR: Reprovision should have failed due to disk size but succeeded!"
return 1
else
echo "SUCCESS: Reprovision correctly failed due to disk size validation"
fi
echo "Verifying VM disk unchanged:"
vmadm get ${VM_UUID?} | json disks[0] | json -a size image_uuid
}
# Test invalid image UUID scenarios
# Usage: test_invalid_image_uuid <vm_uuid> <invalid_uuid>
test_invalid_image_uuid() {
VM_UUID=$1
INVALID_UUID=$2
echo "Testing invalid image UUID: ${INVALID_UUID?}"
if reprovision ${VM_UUID?} ${INVALID_UUID?}; then
echo "ERROR: Reprovision should have failed with invalid UUID but succeeded!"
return 1
else
echo "SUCCESS: Reprovision correctly failed with invalid UUID"
fi
}
# Create a native VM with indestructible_zoneroot for testing constraints
# Usage: create_native_indestructible <alias> <image_uuid>
create_native_indestructible() {
ALIAS=$1
IMAGE_UUID=$2
vmadm create <<EOF
{
"alias": "${ALIAS?}",
"autoboot": false,
"brand": "joyent",
"max_physical_memory": 256,
"quota": 10,
"image_uuid": "${IMAGE_UUID?}",
"indestructible_zoneroot": true,
"nics": []
}
EOF
UUID=$(vmadm list -Ho uuid alias=${ALIAS?})
}
# Create a native VM with autoboot enabled for testing autoboot scenarios
# Usage: create_native_autoboot <alias> <image_uuid>
create_native_autoboot() {
ALIAS=$1
IMAGE_UUID=$2
vmadm create <<EOF
{
"alias": "${ALIAS?}",
"autoboot": true,
"brand": "joyent",
"max_physical_memory": 256,
"quota": 10,
"image_uuid": "${IMAGE_UUID?}",
"nics": []
}
EOF
UUID=$(vmadm list -Ho uuid alias=${ALIAS?})
}
# Create a bhyve VM with indestructible_zoneroot for testing constraints
# Usage: create_bhyve_indestructible <alias> <image_uuid>
create_bhyve_indestructible() {
ALIAS=$1
IMAGE_UUID=$2
vmadm create <<EOF
{
"alias": "${ALIAS?}",
"autoboot": false,
"brand": "bhyve",
"bootrom": "uefi",
"ram": 1024,
"quota": 10,
"indestructible_zoneroot": true,
"disks": [
{
"boot": true,
"compression": "lz4",
"model": "virtio",
"image_uuid": "${IMAGE_UUID?}"
}
],
"nics": []
}
EOF
UUID=$(vmadm list -Ho uuid alias=${ALIAS?})
}
# Create a bhyve VM with non-bootable disk0 (should fail reprovision)
# Usage: create_bhyve_non_bootable <alias> <image_uuid>
create_bhyve_non_bootable() {
ALIAS=$1
IMAGE_UUID=$2
vmadm create <<EOF
{
"alias": "${ALIAS?}",
"autoboot": false,
"brand": "bhyve",
"bootrom": "uefi",
"ram": 1024,
"quota": 10,
"disks": [
{
"compression": "lz4",
"model": "virtio",
"image_uuid": "${IMAGE_UUID?}"
},
{
"boot": true,
"size": 1024,
"compression": "lz4",
"model": "virtio"
}
],
"nics": []
}
EOF
UUID=$(vmadm list -Ho uuid alias=${ALIAS?})
}
# Create a bhyve VM with rich configuration for preservation testing
# Usage: create_bhyve_preserve <alias> <image_uuid>
create_bhyve_preserve() {
ALIAS=$1
IMAGE_UUID=$2
vmadm create <<EOF
{
"alias": "${ALIAS?}",
"hostname": "${ALIAS?}",
"brand": "bhyve",
"ram": 2048,
"vcpus": 4,
"disks": [
{
"boot": true,
"compression": "lz4",
"model": "virtio",
"image_uuid": "${IMAGE_UUID?}"
},
{
"size": 1024,
"compression": "lz4",
"model": "virtio"
}
],
"customer_metadata": {
"root_authorized_keys": "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAEE4qXNM3Weo+IludjapmHzQUPYljhJrNa5zCyk3hYM SSH Key"
},
"resolvers": [
"172.17.2.1",
"8.8.8.8",
"8.8.4.4"
],
"nics": [
{
"model": "virtio",
"nic_tag": "external",
"ips": [
"dhcp"
],
"primary": "1"
}
]
}
EOF
UUID=$(vmadm list -Ho uuid alias=${ALIAS?})
}
# Create an lx VM for testing unsupported brand
# Usage: create_lx <alias> <image_uuid>
create_lx() {
ALIAS=$1
IMAGE_UUID=$2
vmadm create <<EOF
{
"alias": "${ALIAS?}",
"hostname": "${ALIAS?}",
"brand": "lx",
"kernel_version": "5.10.0",
"max_physical_memory": 32768,
"image_uuid": "${IMAGE_UUID?}",
"zfs_root_compression": "lz4",
"delegate_dataset": true,
"zfs_data_compression": "lz4",
"nics": []
}
EOF
UUID=$(vmadm list -Ho uuid alias=${ALIAS?})
}
# Create a bhyve VM with very small disk for resize testing
# Usage: create_bhyve_small_resize <alias>
create_bhyve_small_resize() {
ALIAS=$1
vmadm create <<EOF
{
"alias": "${ALIAS?}",
"autoboot": false,
"brand": "bhyve",
"bootrom": "uefi",
"ram": 1024,
"disks": [
{
"boot": true,
"compression": "lz4",
"model": "virtio",
"size": 1024
},
{
"size": 1024,
"compression": "lz4",
"model": "virtio"
}
],
"nics": []
}
EOF
UUID=$(vmadm list -Ho uuid alias=${ALIAS?})
}
# Test that running VMs are stopped automatically during reprovision
# Usage: test_running_vm_reprovision <vm_uuid> <new_image_uuid>
test_running_vm_reprovision() {
VM_UUID=$1
NEW_IMAGE_UUID=$2
echo "Starting VM for running state test:"
vmadm start ${VM_UUID?}
echo "VM state before reprovision:"
vmadm get ${VM_UUID?} | json state
echo "Attempting reprovision on running VM (should auto-stop):"
if reprovision ${VM_UUID?} ${NEW_IMAGE_UUID?}; then
echo "SUCCESS: Reprovision succeeded, VM was auto-stopped"
echo "VM state after reprovision:"
vmadm get ${VM_UUID?} | json state
else
echo "Reprovision failed on running VM"
return 1
fi
}
# Verify VM functionality and image UUID after reprovision
# Usage: verify_vm_functionality <vm_uuid> <expected_image_uuid>
verify_vm_functionality() {
VM_UUID=$1
EXPECTED_IMAGE_UUID=$2
echo "Testing VM functionality after reprovision:"
echo "Verifying image UUID was updated correctly:"
if vmadm get ${VM_UUID?} | json -e 'if (this.brand === "bhyve") { this.current_image = this.disks[0].image_uuid; } else { this.current_image = this.image_uuid; }' current_image | grep -q "^${EXPECTED_IMAGE_UUID}$"; then
echo "SUCCESS: Image UUID correctly updated after reprovision"
else
echo "ERROR: Image UUID was not updated correctly after reprovision!"
echo "Expected: ${EXPECTED_IMAGE_UUID}"
echo "Actual image UUID:"
vmadm get ${VM_UUID?} | json -e 'if (this.brand === "bhyve") { this.current_image = this.disks[0].image_uuid; } else { this.current_image = this.image_uuid; }' current_image
return 1
fi
}
# Test configuration preservation after reprovision
# Usage: verify_config_preservation <vm_uuid> <expected_image_uuid>
verify_config_preservation() {
VM_UUID=$1
EXPECTED_IMAGE_UUID=$2
echo "Verifying configuration preservation after reprovision:"
# Verify image was updated
verify_vm_functionality ${VM_UUID?} ${EXPECTED_IMAGE_UUID?}
# Check that key configuration was preserved
echo "Checking preserved configuration:"
CURRENT_CONFIG=$(vmadm get ${VM_UUID?})
if echo "$CURRENT_CONFIG" | json hostname | grep -q "preserve"; then
echo "SUCCESS: Hostname preserved"
else
echo "ERROR: Hostname not preserved"
return 1
fi
if echo "$CURRENT_CONFIG" | json ram | grep -q "2048"; then
echo "SUCCESS: RAM setting preserved"
else
echo "ERROR: RAM setting not preserved"
return 1
fi
if echo "$CURRENT_CONFIG" | json vcpus | grep -q "4"; then
echo "SUCCESS: vCPU count preserved"
else
echo "ERROR: vCPU count not preserved"
return 1
fi
if echo "$CURRENT_CONFIG" | json customer_metadata.root_authorized_keys | grep -q "ssh-ed25519"; then
echo "SUCCESS: Customer metadata preserved"
else
echo "ERROR: Customer metadata not preserved"
return 1
fi
if echo "$CURRENT_CONFIG" | json resolvers | grep -q "8.8.8.8"; then
echo "SUCCESS: Resolvers preserved"
else
echo "ERROR: Resolvers not preserved"
return 1
fi
# Check that second disk is preserved
DISK_COUNT=$(echo "$CURRENT_CONFIG" | json disks | json length)
if [ "$DISK_COUNT" = "2" ]; then
echo "SUCCESS: Multiple disks preserved"
else
echo "ERROR: Expected 2 disks, found $DISK_COUNT"
return 1
fi
echo "All configuration preservation tests passed!"
}
echo "--- Native Zone Test ---"
create_native reprovision-native ${NATIVE_UUID1} && \
vmadm list -H uuid=${UUID?} && \
echo "Original image id:" && \
vmadm get ${UUID?} | json image_uuid && \
reprovision ${UUID?} ${NATIVE_UUID2} && \
echo "Reprovisioned image id:" && \
vmadm get ${UUID?} | json image_uuid && \
vmadm delete ${UUID?}
echo "--- Bhyve Test ---"
create_bhyve reprovision-bhyve ${BHYVE_UUID1} && \
vmadm list -H uuid=${UUID?} && \
echo "Original image id:" && \
vmadm get ${UUID?} | json disks[0].image_uuid && \
reprovision ${UUID?} ${BHYVE_UUID2} && \
echo "Reprovisioned image id:" && \
vmadm get ${UUID?} | json disks[0].image_uuid && \
vmadm delete ${UUID?}
echo "--- Failure Test: Native image on Bhyve VM (should fail) ---"
create_bhyve reprovision-bhyve-fail ${BHYVE_UUID1} && \
echo "Original image id:" && \
vmadm get ${UUID?} | json disks[0].image_uuid && \
echo "Attempting reprovision with native image (should fail):" && \
if reprovision ${UUID?} ${NATIVE_UUID1}; then
echo "ERROR: Reprovision should have failed but succeeded!"
exit 1
else
echo "SUCCESS: Reprovision correctly failed as expected"
fi && \
echo "Verifying VM state unchanged:" && \
vmadm get ${UUID?} | json disks[0].image_uuid && \
echo "Verifying VM can still be managed:" && \
vmadm list -H uuid=${UUID?} && \
vmadm delete ${UUID?}
echo "--- Failure Test: Bhyve image on Native VM (should fail) ---"
create_native reprovision-native-fail ${NATIVE_UUID1} && \
echo "Original image id:" && \
vmadm get ${UUID?} | json image_uuid && \
echo "Attempting reprovision with bhyve image (should fail):" && \
if reprovision ${UUID?} ${BHYVE_UUID1}; then
echo "ERROR: Reprovision should have failed but succeeded!"
exit 1
else
echo "SUCCESS: Reprovision correctly failed as expected"
fi && \
echo "Verifying VM state unchanged:" && \
vmadm get ${UUID?} | json image_uuid && \
echo "Verifying VM can still be managed:" && \
vmadm list -H uuid=${UUID?} && \
vmadm delete ${UUID?}
echo "--- Invalid Image UUID Tests ---"
create_native reprovision-invalid-uuid ${NATIVE_UUID1} && \
test_invalid_image_uuid ${UUID?} "not-a-valid-uuid" && \
test_invalid_image_uuid ${UUID?} "" && \
test_invalid_image_uuid ${UUID?} "12345678-1234-1234-1234-123456789012" && \
vmadm delete ${UUID?}
echo "--- Indestructible Zoneroot Test (should fail) ---"
create_native_indestructible reprovision-indestructible ${NATIVE_UUID1} && \
echo "Original image id:" && \
vmadm get ${UUID?} | json image_uuid && \
echo "Attempting reprovision on indestructible VM (should fail):" && \
if reprovision ${UUID?} ${NATIVE_UUID2}; then
echo "ERROR: Reprovision should have failed on indestructible VM!"
exit 1
else
echo "SUCCESS: Reprovision correctly failed on indestructible VM"
fi && \
echo "Unsetting indestructible_zoneroot to allow cleanup:" && \
echo '{"indestructible_zoneroot": false}' | vmadm update ${UUID?} && \
vmadm delete ${UUID?}
echo "--- Running VM Reprovision Test ---"
create_native reprovision-running ${NATIVE_UUID1} && \
test_running_vm_reprovision ${UUID?} ${NATIVE_UUID2} && \
vmadm delete ${UUID?}
echo "--- Autoboot VM Test ---"
create_native_autoboot reprovision-autoboot ${NATIVE_UUID1} && \
echo "Original state and autoboot setting:" && \
vmadm get ${UUID?} | json state autoboot && \
reprovision ${UUID?} ${NATIVE_UUID2} && \
echo "After reprovision - state and autoboot setting:" && \
vmadm get ${UUID?} | json state autoboot && \
echo "Testing if autoboot is preserved..." && \
verify_vm_functionality ${UUID?} ${NATIVE_UUID2} && \
vmadm delete ${UUID?}
echo "--- Disk Size Validation Tests ---"
echo "Test 1: Small disk (1GB) with 10GB image (should fail)"
create_bhyve_custom_disk reprovision-small-disk 1024 && \
test_disk_size_failure ${UUID?} ${BHYVE_UUID2} && \
vmadm delete ${UUID?}
echo "Test 2: Bhyve indestructible VM reprovision (should fail)"
create_bhyve_indestructible reprovision-bhyve-indestructible ${BHYVE_UUID1} && \
echo "Original image id:" && \
vmadm get ${UUID?} | json disks[0].image_uuid && \
echo "Attempting reprovision on indestructible bhyve VM (should fail):" && \
if reprovision ${UUID?} ${BHYVE_UUID2}; then
echo "ERROR: Reprovision should have failed on indestructible bhyve VM!"
exit 1
else
echo "SUCCESS: Reprovision correctly failed on indestructible bhyve VM"
fi && \
echo "Unsetting indestructible_zoneroot to allow cleanup:" && \
echo '{"indestructible_zoneroot": false}' | vmadm update ${UUID?} && \
vmadm delete ${UUID?}
echo "--- VM Functionality Verification ---"
create_bhyve reprovision-function-test ${BHYVE_UUID1} && \
reprovision ${UUID?} ${BHYVE_UUID2} && \
verify_vm_functionality ${UUID?} ${BHYVE_UUID2} && \
vmadm delete ${UUID?}
echo "--- Non-Bootable Disk0 Test (should fail) ---"
create_bhyve_non_bootable reprovision-non-bootable ${BHYVE_UUID1} && \
echo "VM disk configuration (disk0 is not bootable):" && \
vmadm get ${UUID?} | json disks | json -a boot image_uuid && \
echo "Attempting reprovision with non-bootable disk0 (should fail):" && \
if reprovision ${UUID?} ${BHYVE_UUID2}; then
echo "ERROR: Reprovision should have failed with non-bootable disk0!"
exit 1
else
echo "SUCCESS: Reprovision correctly failed with non-bootable disk0"
fi && \
vmadm delete ${UUID?}
echo "--- Small Disk Resize Test ---"
create_bhyve_small_resize reprovision-resize && \
echo "VM with small boot disk" && \
vmadm get ${UUID?} | json disks[0].size && \
echo "Attempting reprovision with 10G image:" && \
if reprovision ${UUID?} ${BHYVE_UUID2}; then
echo "ERROR: Reprovision should have failed with image bigger than disk"
exit 1
else
echo "SUCCESS: Reprovision failed as expected"
fi && \
vmadm delete ${UUID?}
# Test reprovision with snapshots (no holds) - should succeed
# Usage: test_reprovision_with_snapshots <vm_uuid> <new_image_uuid>
test_reprovision_with_snapshots() {
VM_UUID=$1
NEW_IMAGE_UUID=$2
echo "Testing reprovision with snapshots (no holds):"
# Get the disk0 dataset path
DISK0_DS=$(vmadm get ${VM_UUID?} | json disks[0].path | sed 's|^/dev/zvol/[rd]dsk/||')
echo "Disk0 dataset: $DISK0_DS"
# Create a test snapshot on disk0
echo "Creating test snapshot on disk0..."
zfs snapshot ${DISK0_DS}@test-snapshot
echo "Listing snapshots before reprovision:"
zfs list -t snapshot ${DISK0_DS} 2>/dev/null || echo "No snapshots listed"
echo "Attempting reprovision (should succeed and destroy snapshots):"
if reprovision ${VM_UUID?} ${NEW_IMAGE_UUID?}; then
echo "SUCCESS: Reprovision succeeded with snapshots present"
echo "Verifying snapshots were destroyed:"
if zfs list -t snapshot ${DISK0_DS} 2>/dev/null | grep -q "${DISK0_DS}@"; then
echo "WARNING: Some snapshots may still exist"
zfs list -t snapshot ${DISK0_DS}
else
echo "SUCCESS: All snapshots were properly destroyed"
fi
# Verify the image was updated
ACTUAL_IMAGE=$(vmadm get ${VM_UUID?} | json disks[0].image_uuid)
if [ "$ACTUAL_IMAGE" = "$NEW_IMAGE_UUID" ]; then
echo "SUCCESS: Image UUID updated correctly to $NEW_IMAGE_UUID"
else
echo "ERROR: Image UUID not updated. Expected: $NEW_IMAGE_UUID, Actual: $ACTUAL_IMAGE"
return 1
fi
else
echo "ERROR: Reprovision failed with snapshots present (should have succeeded)"
return 1
fi
}
# Test reprovision with snapshots that have holds - should fail
# Usage: test_reprovision_with_holds <vm_uuid> <new_image_uuid>
test_reprovision_with_holds() {
VM_UUID=$1
NEW_IMAGE_UUID=$2
echo "Testing reprovision with held snapshots (should fail):"
# Get the disk0 dataset path
DISK0_DS=$(vmadm get ${VM_UUID?} | json disks[0].path | sed 's|^/dev/zvol/[rd]dsk/||')
echo "Disk0 dataset: $DISK0_DS"
# Create test snapshots on disk0
echo "Creating test snapshots with holds..."
zfs snapshot ${DISK0_DS}@test-snapshot-1
zfs snapshot ${DISK0_DS}@test-snapshot-2
# Add holds to the snapshots
zfs hold test-hold-1 ${DISK0_DS}@test-snapshot-1
zfs hold test-hold-2 ${DISK0_DS}@test-snapshot-2
echo "Listing snapshots and holds:"
zfs holds ${DISK0_DS}@test-snapshot-1 ${DISK0_DS}@test-snapshot-2
echo "Attempting reprovision (should fail due to holds):"
if reprovision ${VM_UUID?} ${NEW_IMAGE_UUID?}; then
echo "ERROR: Reprovision should have failed due to holds on snapshots!"
# Clean up holds before returning error
zfs release test-hold-1 ${DISK0_DS}@test-snapshot-1 2>/dev/null || true
zfs release test-hold-2 ${DISK0_DS}@test-snapshot-2 2>/dev/null || true
return 1
else
echo "SUCCESS: Reprovision correctly failed due to holds on snapshots"
# Verify VM image was not changed
ACTUAL_IMAGE=$(vmadm get ${VM_UUID?} | json disks[0].image_uuid)
ORIGINAL_IMAGE_UUID=$3 # Pass original image as third parameter
if [ "$ACTUAL_IMAGE" = "$ORIGINAL_IMAGE_UUID" ]; then
echo "SUCCESS: VM image unchanged after failed reprovision"
else
echo "WARNING: VM image may have changed despite failure"
fi
fi
# Clean up: release holds and destroy snapshots
echo "Cleaning up holds and snapshots..."
zfs release test-hold-1 ${DISK0_DS}@test-snapshot-1 2>/dev/null || true
zfs release test-hold-2 ${DISK0_DS}@test-snapshot-2 2>/dev/null || true
zfs destroy ${DISK0_DS}@test-snapshot-1 2>/dev/null || true
zfs destroy ${DISK0_DS}@test-snapshot-2 2>/dev/null || true
# Now test that reprovision works after holds are removed
echo "Testing reprovision after holds removed (should now succeed):"
if reprovision ${VM_UUID?} ${NEW_IMAGE_UUID?}; then
echo "SUCCESS: Reprovision succeeded after holds were released"
# Verify image was updated
ACTUAL_IMAGE=$(vmadm get ${VM_UUID?} | json disks[0].image_uuid)
if [ "$ACTUAL_IMAGE" = "$NEW_IMAGE_UUID" ]; then
echo "SUCCESS: Image UUID updated correctly to $NEW_IMAGE_UUID"
else
echo "ERROR: Image UUID not updated. Expected: $NEW_IMAGE_UUID, Actual: $ACTUAL_IMAGE"
return 1
fi
else
echo "ERROR: Reprovision failed even after holds were released"
return 1
fi
}
echo "--- Snapshot Tests (No Holds) ---"
create_bhyve reprovision-snapshot-test ${BHYVE_UUID1} && \
test_reprovision_with_snapshots ${UUID?} ${BHYVE_UUID2} && \
vmadm delete ${UUID?}
echo "--- Snapshot Hold Tests ---"
create_bhyve reprovision-hold-test ${BHYVE_UUID1} && \
test_reprovision_with_holds ${UUID?} ${BHYVE_UUID2} ${BHYVE_UUID1} && \
vmadm delete ${UUID?}
Nahum Shalman commented on 2025-08-29T10:53:19.936-0400 (edited 2025-09-03T13:41:09.886-0400):
And for posterity, the flowcharts Claude created for me (rendered here):
# VM Reprovision Decision Tree Flowcharts
This document contains three flowcharts that together document the decision tree in the `vmadm reprovision` code path from `/workspace/smartos-live/src/vm/node_modules/VM.js:11226`.
## Chart 1: Pre-Brand Check Steps (Initial Validation)
This chart shows all validation and preparation steps that occur before the code branches into native zone or bhyve-specific paths.
```mermaid
flowchart TD
Start([vmadm reprovision called]) --> LoadVM[Load VM & Check:<br/>• Brand supports reprovision<br/>• Has required features<br/>• Has image_uuid payload<br/>• No indestructible_zoneroot<br/>• No multiple/non-standard datasets]
LoadVM -->|Any check fails| End([End with Error])
LoadVM -->|All checks pass| IsBhyve{Is brand<br/>bhyve?}
IsBhyve -->|Yes| ValidateBhyve[Validate bhyve requirements:<br/>• Validate zvol image<br/>• Check image size ≤ disk0 size<br/>• Check for holds on disk0 snapshots]
IsBhyve -->|No| ValidateZone[Validate zone-dataset image]
ValidateBhyve -->|Validation fails| End
ValidateZone -->|Validation fails| End
ValidateBhyve -->|Validation passes| CheckVMState
ValidateZone -->|Validation passes| CheckVMState{VM state is<br/>installed?}
CheckVMState -->|Yes| SetTransition[Set transition to provisioning]
CheckVMState -->|No| StopVM[Stop VM & wait for<br/>installed state<br/>with timeout/force halt]
StopVM -->|Stop fails| End
StopVM -->|Stop succeeds| SetTransition
SetTransition -->|Transition fails| End
SetTransition -->|Transition succeeds| BranchPoint{Is brand<br/>bhyve?}
BranchPoint -->|Yes| BhyveProcess([Continue to<br/>Bhyve Process])
BranchPoint -->|No| NativeProcess([Continue to<br/>Native Process])
style Start fill:#4CAF50,stroke:#2E7D32,stroke-width:2px,color:#000
style BhyveProcess fill:#2196F3,stroke:#1565C0,stroke-width:2px,color:#fff
style NativeProcess fill:#2196F3,stroke:#1565C0,stroke-width:2px,color:#fff
style End fill:#f44336,stroke:#c62828,stroke-width:2px,color:#fff
```
## Chart 2: Native Zone Reprovision Process
This chart shows the complete native zone reprovision workflow after initial validation.
```mermaid
flowchart TD
Start([From Pre-Brand Check]) --> PrepareDatasets[Prepare datasets:<br/>• Unset zoned flag if needed<br/>• Unmount cores<br/>• Rename datasets for backup]
PrepareDatasets -->|Preparation fails| End([End with Error])
PrepareDatasets -->|Preparation succeeds| CloneImage[Find/create snapshot & clone<br/>to new zoneroot<br/>with retry on busy]
CloneImage -->|Clone fails| End
CloneImage -->|Clone succeeds| CopyConfig[Create config directory &<br/>copy config from old root]
CopyConfig --> DestroyOld[Destroy old zoneroot &<br/>remount cores]
DestroyOld --> RunInstall[Run brand install script<br/>with -r flag]
RunInstall --> RestoreDatasets[Restore delegated datasets:<br/>• Rename back from backup<br/>• Set zoned flag<br/>• Attach to zone]
RestoreDatasets --> Finalize[Run installZone &<br/>cleanup vminfod stream]
Finalize -->|Any errors| End
Finalize -->|Success| Success([Reprovision Complete])
style Start fill:#2196F3,stroke:#1565C0,stroke-width:2px,color:#fff
style Success fill:#4CAF50,stroke:#2E7D32,stroke-width:2px,color:#000
style End fill:#f44336,stroke:#c62828,stroke-width:2px,color:#fff
```
## Chart 3: Bhyve Reprovision Process
This chart shows the bhyve-specific reprovision workflow after initial validation.
```mermaid
flowchart TD
Start([From Pre-Brand Check]) --> ValidateDisk[Find & validate disk0:<br/>• Must exist in VM disk list<br/>• Must be marked bootable]
ValidateDisk -->|Validation fails| End([End with Error])
ValidateDisk -->|Validation passes| FindSnapshot[Find image snapshot<br/>@final or @uuid]
FindSnapshot -->|Snapshot not found| End
FindSnapshot -->|Snapshot found| ReplaceDisk[Replace disk0:<br/>• Destroy old disk0 with -r flag<br/>• Clone snapshot to new disk0<br/>• Update zone image-uuid]
ReplaceDisk --> RunInstall[Run brand install script<br/>with -r flag]
RunInstall --> Cleanup[Unset provisioning transition<br/>& optionally start VM]
Cleanup --> Success([Reprovision Complete])
style Start fill:#2196F3,stroke:#1565C0,stroke-width:2px,color:#fff
style Success fill:#4CAF50,stroke:#2E7D32,stroke-width:2px,color:#000
style End fill:#f44336,stroke:#c62828,stroke-width:2px,color:#fff
```
## Key Decision Points
### 1. **Brand Support Check**
- Verifies if the VM's brand supports reprovision
- Requires both `reprovision` and `brand_install_script` features
### 2. **Image UUID Validation**
- Ensures `image_uuid` is provided in the payload
- For bhyve: validates zvol image type for disk0 replacement
- For native zones: validates zone-dataset image type
### 3. **Indestructible Check**
- Cannot reprovision if `indestructible_zoneroot` is set
- Must be disabled before reprovision
### 4. **Dataset Validation**
- Cannot have multiple delegated datasets
- Only supports standard `/data` delegated dataset
- Non-standard datasets block reprovision
### 5. **VM State Management**
- VM must be in "installed" state
- Automatically stops running VMs
- Implements timeout and force halt fallback
### 6. **Disk Size Validation (Bhyve Only)**
- **Two-step validation process** using `vasync.waterfall`:
1. **validateImage**: Validates zvol image type and availability
2. **validateBhyveDiskSizes**: Helper function that compares disk sizes
- Validates new image size against current disk0 size **before state transition**
- Prevents reprovision if new image is larger than current disk
- Ensures VM stays in original state if validation fails
- **Refactored into separate function** to reduce code complexity and nesting
### 7. **ZFS Hold Validation (Bhyve Only)**
- **Pre-transition detection**: Checks for ZFS holds on disk0 snapshots before any state transitions
- **Comprehensive checking**: Lists all snapshots on disk0 dataset and checks each for holds
- **Detailed error reporting**: Shows exactly which snapshots have which holds
- **Safe abort**: VM remains unchanged if holds would prevent snapshot destruction
- **Prevents provisioning state**: Aborts before VM enters "provisioning" state
### 8. **Brand-Specific Paths**
- **Bhyve**: Replaces disk0 only with hold checking
- **Native Zones**: Replaces entire zoneroot
### 9. **Snapshot Management**
- Checks for existing snapshots (@final or @uuid)
- Creates new snapshot if needed
- Clones snapshot to create new filesystem
### 10. **Configuration Preservation**
- Copies existing config directory
- Preserves metadata, routes, tags
- Maintains delegated datasets
### 11. **Cleanup Operations**
- Removes temporary datasets
- Destroys old root datasets
- Restores delegated datasets
### 12. **Post-Reprovision Actions**
- Runs brand install script with `-r` flag
- Updates zone metadata
- Optionally starts VM if autoboot is set
## Error Handling
The flowchart shows all error paths leading to termination with specific error messages:
- Brand compatibility errors
- Missing required features
- Invalid images **including disk size incompatibility for bhyve VMs**
- Dataset configuration issues
- **ZFS hold detection for bhyve VMs** - prevents reprovision when snapshots have holds
- State transition failures
- Filesystem operation failures
## Notes
- The bhyve brand follows a simplified path focusing on disk0 replacement
- Native zones undergo full zoneroot replacement with config preservation
- All operations are atomic with rollback capabilities
- Transitions are used to track provisioning state
- The process integrates with vminfod for state management
Nahum Shalman commented on 2025-09-03T13:45:34.072-0400:
Test Results:
[root@smartos ~]# /opt/smartos-builds/smartos-live/reprovision-test.sh
--- Native Zone Test ---
Successfully created VM 36c54af5-0d65-44ca-8d74-142f697dc30d
36c54af5-0d65-44ca-8d74-142f697dc30d OS 256 stopped reprovision-native
Original image id:
e44ed3e0-910b-11ed-a5d4-00151714048c
Successfully reprovisioned VM 36c54af5-0d65-44ca-8d74-142f697dc30d
Reprovisioned image id:
2f1dc911-6401-4fa4-8e9d-67ea2e39c271
Successfully deleted VM 36c54af5-0d65-44ca-8d74-142f697dc30d
--- Bhyve Test ---
Successfully created VM a84a129d-c99b-470f-93bd-bd24c86eb644
a84a129d-c99b-470f-93bd-bd24c86eb644 BHYV 1024 stopped reprovision-bhyve
Original image id:
1cbf6e26-dcfb-4a10-b982-0dedde078bf7
Successfully reprovisioned VM a84a129d-c99b-470f-93bd-bd24c86eb644
Reprovisioned image id:
de6c5581-1779-43b4-8925-aca22dd0980b
Successfully deleted VM a84a129d-c99b-470f-93bd-bd24c86eb644
--- Failure Test: Native image on Bhyve VM (should fail) ---
Successfully created VM 0d8fb26b-3722-4d80-892a-d11554493b5f
Original image id:
1cbf6e26-dcfb-4a10-b982-0dedde078bf7
Attempting reprovision with native image (should fail):
Failed to reprovision VM 0d8fb26b-3722-4d80-892a-d11554493b5f: image e44ed3e0-910b-11ed-a5d4-00151714048c is type zone-dataset, but must be one of: ["zvol"]
SUCCESS: Reprovision correctly failed as expected
Verifying VM state unchanged:
1cbf6e26-dcfb-4a10-b982-0dedde078bf7
Verifying VM can still be managed:
0d8fb26b-3722-4d80-892a-d11554493b5f BHYV 1024 stopped reprovision-bhyve-fail
Successfully deleted VM 0d8fb26b-3722-4d80-892a-d11554493b5f
--- Failure Test: Bhyve image on Native VM (should fail) ---
Successfully created VM 440cdf63-1a26-4145-b708-2dc6e61a950f
Original image id:
e44ed3e0-910b-11ed-a5d4-00151714048c
Attempting reprovision with bhyve image (should fail):
Failed to reprovision VM 440cdf63-1a26-4145-b708-2dc6e61a950f: image 1cbf6e26-dcfb-4a10-b982-0dedde078bf7 is type zvol, but must be one of: ["zone-dataset"]
SUCCESS: Reprovision correctly failed as expected
Verifying VM state unchanged:
e44ed3e0-910b-11ed-a5d4-00151714048c
Verifying VM can still be managed:
440cdf63-1a26-4145-b708-2dc6e61a950f OS 256 stopped reprovision-native-fail
Successfully deleted VM 440cdf63-1a26-4145-b708-2dc6e61a950f
--- Invalid Image UUID Tests ---
Successfully created VM 1aa445ab-7a5e-483f-bdff-633b901459b5
Testing invalid image UUID: not-a-valid-uuid
Failed to reprovision VM 1aa445ab-7a5e-483f-bdff-633b901459b5: Command failed: imgadm get: error (InvalidUUID): invalid uuid: "not-a-valid-uuid"
SUCCESS: Reprovision correctly failed with invalid UUID
Testing invalid image UUID:
Failed to reprovision VM 1aa445ab-7a5e-483f-bdff-633b901459b5: Command failed: imgadm get: error (InvalidUUID): invalid uuid: ""
SUCCESS: Reprovision correctly failed with invalid UUID
Testing invalid image UUID: 12345678-1234-1234-1234-123456789012
Failed to reprovision VM 1aa445ab-7a5e-483f-bdff-633b901459b5: Command failed: imgadm get: error (ImageNotInstalled): image "12345678-1234-1234-1234-123456789012" was not found on zpool "zones"
SUCCESS: Reprovision correctly failed with invalid UUID
Successfully deleted VM 1aa445ab-7a5e-483f-bdff-633b901459b5
--- Indestructible Zoneroot Test (should fail) ---
Successfully created VM a7a78359-3e5e-47d6-8588-715bdf06d39d
Original image id:
e44ed3e0-910b-11ed-a5d4-00151714048c
Attempting reprovision on indestructible VM (should fail):
Failed to reprovision VM a7a78359-3e5e-47d6-8588-715bdf06d39d: indestructible_zoneroot is set, cannot reprovision
SUCCESS: Reprovision correctly failed on indestructible VM
Unsetting indestructible_zoneroot to allow cleanup:
Successfully updated VM a7a78359-3e5e-47d6-8588-715bdf06d39d
Successfully deleted VM a7a78359-3e5e-47d6-8588-715bdf06d39d
--- Running VM Reprovision Test ---
Successfully created VM 972ca00e-5146-4990-a602-52528052f800
Starting VM for running state test:
Successfully started VM 972ca00e-5146-4990-a602-52528052f800
VM state before reprovision:
running
Attempting reprovision on running VM (should auto-stop):
Successfully reprovisioned VM 972ca00e-5146-4990-a602-52528052f800
SUCCESS: Reprovision succeeded, VM was auto-stopped
VM state after reprovision:
running
Successfully deleted VM 972ca00e-5146-4990-a602-52528052f800
--- Autoboot VM Test ---
Successfully created VM bae2558d-3d6f-4f81-8002-51ae450d1000
Original state and autoboot setting:
running
true
Successfully reprovisioned VM bae2558d-3d6f-4f81-8002-51ae450d1000
After reprovision - state and autoboot setting:
running
true
Testing if autoboot is preserved...
Testing VM functionality after reprovision:
Verifying image UUID was updated correctly:
SUCCESS: Image UUID correctly updated after reprovision
Successfully deleted VM bae2558d-3d6f-4f81-8002-51ae450d1000
--- Disk Size Validation Tests ---
Test 1: Small disk (1GB) with 10GB image (should fail)
Successfully created VM 8a73d6a7-5db6-4c77-970e-c755b177aa32
Testing disk size validation (image too large):
Original disk configuration:
1024
Attempting reprovision with larger image (should fail):
Failed to reprovision VM 8a73d6a7-5db6-4c77-970e-c755b177aa32: Cannot reprovision: image (10240 MiB) > disk0 (1024 MiB)
SUCCESS: Reprovision correctly failed due to disk size validation
Verifying VM disk unchanged:
1024
Successfully deleted VM 8a73d6a7-5db6-4c77-970e-c755b177aa32
Test 2: Bhyve indestructible VM reprovision (should fail)
Successfully created VM c16886a0-caeb-48f7-82b4-6aae1916ec44
Original image id:
1cbf6e26-dcfb-4a10-b982-0dedde078bf7
Attempting reprovision on indestructible bhyve VM (should fail):
Failed to reprovision VM c16886a0-caeb-48f7-82b4-6aae1916ec44: indestructible_zoneroot is set, cannot reprovision
SUCCESS: Reprovision correctly failed on indestructible bhyve VM
Unsetting indestructible_zoneroot to allow cleanup:
Successfully updated VM c16886a0-caeb-48f7-82b4-6aae1916ec44
Successfully deleted VM c16886a0-caeb-48f7-82b4-6aae1916ec44
--- VM Functionality Verification ---
Successfully created VM 4b17d536-3728-4c2f-8852-73ccfc9ae0fa
Successfully reprovisioned VM 4b17d536-3728-4c2f-8852-73ccfc9ae0fa
Testing VM functionality after reprovision:
Verifying image UUID was updated correctly:
SUCCESS: Image UUID correctly updated after reprovision
Successfully deleted VM 4b17d536-3728-4c2f-8852-73ccfc9ae0fa
--- Non-Bootable Disk0 Test (should fail) ---
Successfully created VM 5f67f0f7-5aa5-47bc-882a-903073c569c3
VM disk configuration (disk0 is not bootable):
false 1cbf6e26-dcfb-4a10-b982-0dedde078bf7
true
Attempting reprovision with non-bootable disk0 (should fail):
Failed to reprovision VM 5f67f0f7-5aa5-47bc-882a-903073c569c3: disk0 found but does not have boot property set to true - reprovision requires a bootable disk0
SUCCESS: Reprovision correctly failed with non-bootable disk0
Successfully deleted VM 5f67f0f7-5aa5-47bc-882a-903073c569c3
--- Small Disk Resize Test ---
Successfully created VM f03d6bed-a069-468b-9f38-4b491ac19a5e
VM with small boot disk
1024
Attempting reprovision with 10G image:
Failed to reprovision VM f03d6bed-a069-468b-9f38-4b491ac19a5e: Cannot reprovision: image (10240 MiB) > disk0 (1024 MiB)
SUCCESS: Reprovision failed as expected
Successfully deleted VM f03d6bed-a069-468b-9f38-4b491ac19a5e
--- Snapshot Tests (No Holds) ---
Successfully created VM 02ff6633-693f-466e-912a-fe28a7c388e9
Testing reprovision with snapshots (no holds):
Disk0 dataset: zones/02ff6633-693f-466e-912a-fe28a7c388e9/disk0
Creating test snapshot on disk0...
Listing snapshots before reprovision:
No snapshots listed
Attempting reprovision (should succeed and destroy snapshots):
Successfully reprovisioned VM 02ff6633-693f-466e-912a-fe28a7c388e9
SUCCESS: Reprovision succeeded with snapshots present
Verifying snapshots were destroyed:
SUCCESS: All snapshots were properly destroyed
SUCCESS: Image UUID updated correctly to de6c5581-1779-43b4-8925-aca22dd0980b
Successfully deleted VM 02ff6633-693f-466e-912a-fe28a7c388e9
--- Snapshot Hold Tests ---
Successfully created VM 694f72ff-adbe-4642-bf39-d07593cb9e26
Testing reprovision with held snapshots (should fail):
Disk0 dataset: zones/694f72ff-adbe-4642-bf39-d07593cb9e26/disk0
Creating test snapshots with holds...
Listing snapshots and holds:
NAME TAG TIMESTAMP
zones/694f72ff-adbe-4642-bf39-d07593cb9e26/disk0@test-snapshot-1 test-hold-1 Wed Sep 3 17:32 2025
zones/694f72ff-adbe-4642-bf39-d07593cb9e26/disk0@test-snapshot-2 test-hold-2 Wed Sep 3 17:32 2025
Attempting reprovision (should fail due to holds):
Failed to reprovision VM 694f72ff-adbe-4642-bf39-d07593cb9e26: Cannot reprovision: disk0 has snapshots with holds that must be released first:
- zones/694f72ff-adbe-4642-bf39-d07593cb9e26/disk0@test-snapshot-1 (holds: test-hold-1)
- zones/694f72ff-adbe-4642-bf39-d07593cb9e26/disk0@test-snapshot-2 (holds: test-hold-2)
SUCCESS: Reprovision correctly failed due to holds on snapshots
SUCCESS: VM image unchanged after failed reprovision
Cleaning up holds and snapshots...
Testing reprovision after holds removed (should now succeed):
Successfully reprovisioned VM 694f72ff-adbe-4642-bf39-d07593cb9e26
SUCCESS: Reprovision succeeded after holds were released
SUCCESS: Image UUID updated correctly to de6c5581-1779-43b4-8925-aca22dd0980b
Successfully deleted VM 694f72ff-adbe-4642-bf39-d07593cb9e26
Dan McDonald commented on 2025-09-03T15:57:27.053-0400:
Testing on a Kebecloud CN suggests that you can, by hand, reprovision a BHYVE VM on a CN and upstack Triton doesn’t seem to mind all that much.
[root@shemp (kebecloud) ~]# vmadm reprovision b66a6b8a-7e4f-4d98-9787-d39b0cfa5995 < /tmp/reprovision
2025-09-03T19:41:14.201793+00:00 shemp ip: [ID 722105 kern.warning] WARNING: ip_interface_cleanup: cannot open /devices/pseudo/udp6@0:udp6: error 13
2025-09-03T19:41:14.201816+00:00 shemp ip: [ID 722105 kern.warning] WARNING: ip_interface_cleanup: cannot open /devices/pseudo/udp@0:udp: error 13
Successfully reprovisioned VM b66a6b8a-7e4f-4d98-9787-d39b0cfa5995
[root@shemp (kebecloud) ~]# cat /tmp/reprovision
{
"zonename": "b66a6b8a-7e4f-4d98-9787-d39b0cfa5995",
"image_uuid": "d89041d8-41b9-481d-918a-722be4598dd1"
}
[root@shemp (kebecloud) ~]#
and...
kebe(~)[0]% ssh root@172.24.4.185
The authenticity of host '172.24.4.185 (172.24.4.185)' can't be established.
ED25519 key fingerprint is SHA256:SUN462h//l3fx5R+J0euRkLsZWZvr1ze9+zMDRRgpRI.
This key is not known by any other names.
Are you sure you want to continue connecting (yes/no/[fingerprint])? yes
Warning: Permanently added '172.24.4.185' (ED25519) to the list of known hosts.
Linux b66a6b8a-7e4f-4d98-9787-d39b0cfa5995 6.1.0-13-amd64 #1 SMP PREEMPT_DYNAMIC Debian 6.1.55-1 (2023-09-29) x86_64
*--+--*--*
/| /| /| /|
/ |/ |/ |/ | ##### #### # ##### ### # # TM
+--*--+--*--* # # # # # # # ## #
| /| /| /| /| # #### # # # # # # #
|/ |/ |/ |/ | # # # # # # # # ##
*--+--+--+--+ # # # # # ### # #
| /| /| /| /
|/ |/ |/ |/
*--*--+--*
HVM Instance (Debian 12 20231127T032311)
https://docs.tritondatacenter.com/public-cloud/instances/virtual-machines/images/linux/debian
root@b66a6b8a-7e4f-4d98-9787-d39b0cfa5995:~# ls ~danmcd
ls: cannot access '~danmcd': No such file or directory
root@b66a6b8a-7e4f-4d98-9787-d39b0cfa5995:~# ls /data/danmcd
file-locking-backup kebe.txt
root@b66a6b8a-7e4f-4d98-9787-d39b0cfa5995:~# cat /data/danmcd/kebe.txt
KEBE WAS HERE, AND THIS ONE SURVIVES!
root@b66a6b8a-7e4f-4d98-9787-d39b0cfa5995:~#
and…
[root@moe (kebecloud) ~]# sdc-vmapi /vms/b66a6b8a-7e4f-4d98-9787-d39b0cfa5995 | egrep '202[0-5]-'
"create_timestamp": "2024-05-20T19:33:44.035Z",
"last_modified": "2025-09-03T19:41:19.000Z",
"boot_timestamp": "2025-09-03T19:41:18.000Z",
[root@moe (kebecloud) ~]#