From 67ccd0aba6a19023e8c4d7b0bd35bd02a96ca0b3 Mon Sep 17 00:00:00 2001 From: Yibo Zhuang Date: Sun, 19 Apr 2026 13:35:12 -0700 Subject: [PATCH] Add support for network block device (NBD) This change adds support for attaching network block device (NBD) to both LinuxContainer and LinuxPod. For LinuxContainer, whether to use the underlying VZNetworkBlockDeviceStorageDeviceAttachment is determined by the URL of the container Mount source. For LinuxPod, adds additional API to support pod-level volumes that can be mounted into multiple containers. The PodVolume type provides enum to support multiple type of volume source. ContainerConfiguration for LinuxPod adds additional volumeMounts type which are container mounts where the source comes from a pod-level volume. This will allow the NBD to be attached to the pod at the VM level and then bind-mounted into a container. For integration tests, added a lightweight NBD server implementation in swift that speaks the NBD protocol to ensure there is sufficient coverage for the changes introduced. --- Package.swift | 2 + .../Containerization/AttachedFilesystem.swift | 8 +- Sources/Containerization/LinuxPod.swift | 137 +++++ Sources/Containerization/Mount.swift | 83 ++- Sources/Integration/NBDServer.swift | 381 +++++++++++++ Sources/Integration/NBDTests.swift | 536 ++++++++++++++++++ Sources/Integration/PodTests.swift | 57 ++ Sources/Integration/Suite.swift | 9 + Tests/ContainerizationTests/MountTests.swift | 191 +++++++ 9 files changed, 1399 insertions(+), 5 deletions(-) create mode 100644 Sources/Integration/NBDServer.swift create mode 100644 Sources/Integration/NBDTests.swift diff --git a/Package.swift b/Package.swift index 7e946048..873cf465 100644 --- a/Package.swift +++ b/Package.swift @@ -269,6 +269,8 @@ package.targets.append( dependencies: [ .product(name: "Logging", package: "swift-log"), .product(name: "ArgumentParser", package: "swift-argument-parser"), + .product(name: "NIOCore", package: "swift-nio"), + .product(name: "NIOPosix", package: "swift-nio"), "Containerization", ], path: "Sources/Integration" diff --git a/Sources/Containerization/AttachedFilesystem.swift b/Sources/Containerization/AttachedFilesystem.swift index 6c867815..070fca4c 100644 --- a/Sources/Containerization/AttachedFilesystem.swift +++ b/Sources/Containerization/AttachedFilesystem.swift @@ -29,14 +29,14 @@ public struct AttachedFilesystem: Sendable { public var options: [String] public init(mount: Mount, allocator: any AddressAllocator) throws { - switch mount.type { - case "virtiofs": + switch mount.runtimeOptions { + case .virtiofs: let name = try hashMountSource(source: mount.source) self.source = name - case "ext4": + case .virtioblk: let char = try allocator.allocate() self.source = "/dev/vd\(char)" - default: + case .any: self.source = mount.source } self.type = mount.type diff --git a/Sources/Containerization/LinuxPod.swift b/Sources/Containerization/LinuxPod.swift index a37ce062..f1dff0d5 100644 --- a/Sources/Containerization/LinuxPod.swift +++ b/Sources/Containerization/LinuxPod.swift @@ -59,6 +59,8 @@ public final class LinuxPod: Sendable { /// The default hosts file configuration for all containers in the pod. /// Individual containers can override this by setting their own `hosts` configuration. public var hosts: Hosts? + /// Volumes attached to the pod. Can be shared with multiple containers. + public var volumes: [PodVolume] = [] public init() {} } @@ -86,10 +88,70 @@ public final class LinuxPod: Sendable { /// Run the container with a minimal init process that handles signal /// forwarding and zombie reaping. public var useInit: Bool = false + /// The container mounts that references pod-level attached volumes. + public var volumeMounts: [VolumeMount] = [] public init() {} } + /// A volume that is attached at the pod level and can be shared by multiple containers. + public struct PodVolume: Sendable { + /// Describes the backing storage for the volume. + public enum Source: Sendable { + /// A network block device (NBD) volume. + case nbd(url: URL, timeout: TimeInterval? = nil, readOnly: Bool = false) + } + + /// The logical name of this volume. Containers reference this name in their `VolumeMount` entries. + public var name: String + /// The backing storage source for this volume. + public var source: Source + /// The filesystem format on the volume. + public var format: String + + public init(name: String, source: Source, format: String) { + self.name = name + self.source = source + self.format = format + } + + func toMount() -> Mount { + switch source { + case .nbd(let url, let timeout, let readOnly): + var runtimeOptions: [String] = [] + if let timeout { + runtimeOptions.append("vzTimeout=\(timeout)") + } + if readOnly { + runtimeOptions.append("vzForcedReadOnly=true") + } + return Mount.block( + format: self.format, + source: url.absoluteString, + destination: LinuxPod.guestVolumePath(name), + options: readOnly ? ["ro"] : [], + runtimeOptions: runtimeOptions + ) + } + } + } + + /// A container mount that references a pod-level attached volume. + public struct VolumeMount: Sendable { + /// The name of the `PodVolume` to mount. + public var name: String + /// The destination path inside the container. + public var destination: String + /// Mount options (e.g. ["ro"]). + public var options: [String] + + public init(name: String, destination: String, options: [String] = []) { + self.name = name + self.destination = destination + self.options = options + } + } + private struct PodContainer: Sendable { let id: String let rootfs: Mount @@ -257,6 +319,10 @@ public final class LinuxPod: Sendable { private static func guestSocketStagingPath(_ containerID: String, socketID: String) -> String { "/run/container/\(containerID)/sockets/\(socketID).sock" } + + private static func guestVolumePath(_ volumeName: String) -> String { + "/run/volumes/\(volumeName)" + } } extension LinuxPod { @@ -332,6 +398,33 @@ extension LinuxPod { mountsByID[id] = [modifiedRootfs] + container.fileMountContext.transformedMounts } + // Validate pod volume names are unique. + var volumeNames = Set() + for volume in self.config.volumes { + guard volumeNames.insert(volume.name).inserted else { + throw ContainerizationError( + .invalidArgument, + message: "duplicate pod volume name \"\(volume.name)\"" + ) + } + } + + // Validate that all container volumeMounts reference valid pod volume names. + for (id, container) in state.containers { + for volumeMount in container.config.volumeMounts { + guard volumeNames.contains(volumeMount.name) else { + throw ContainerizationError( + .invalidArgument, + message: "container \(id) references unknown pod volume \"\(volumeMount.name)\"" + ) + } + } + } + let podVolumeMounts = self.config.volumes.map { $0.toMount() } + if !podVolumeMounts.isEmpty { + mountsByID[self.id] = podVolumeMounts + } + let vmConfig = VMConfiguration( cpus: self.config.cpus, memoryInBytes: self.config.memoryInBytes, @@ -347,6 +440,7 @@ extension LinuxPod { do { let containers = state.containers + let volumes = self.config.volumes let shareProcessNamespace = self.config.shareProcessNamespace let pauseProcessHolder = Mutex(nil) let fileMountContextUpdates = Mutex<[String: FileMountContext]>([:]) @@ -429,6 +523,26 @@ extension LinuxPod { } } + // Mount pod-level volumes. + let podVolumeAttachments = vm.mounts[self.id] ?? [] + for (index, volume) in volumes.enumerated() { + guard index < podVolumeAttachments.count else { + throw ContainerizationError( + .notFound, + message: "attached filesystem not found for pod volume \"\(volume.name)\"" + ) + } + let attachment = podVolumeAttachments[index] + let guestPath = Self.guestVolumePath(volume.name) + try await agent.mount( + ContainerizationOCI.Mount( + type: volume.format, + source: attachment.source, + destination: guestPath, + options: [] + )) + } + // Start up unix socket relays for each container for (_, container) in containers { for socket in container.config.sockets { @@ -560,6 +674,17 @@ extension LinuxPod { )) } + // Bind mount pod volumes into the container. + for volumeMount in container.config.volumeMounts { + mounts.append( + ContainerizationOCI.Mount( + type: "none", + source: Self.guestVolumePath(volumeMount.name), + destination: volumeMount.destination, + options: ["bind"] + volumeMount.options + )) + } + spec.mounts = cleanAndSortMounts(mounts) // Configure namespaces for the container @@ -719,6 +844,18 @@ extension LinuxPod { } } + // Unmount pod-level volumes. + if createdState.vm.state != .stopped && !self.config.volumes.isEmpty { + try? await createdState.vm.withAgent { agent in + for volume in self.config.volumes { + try await agent.umount( + path: Self.guestVolumePath(volume.name), + flags: 0 + ) + } + } + } + try await createdState.vm.stop() state.phase = .initialized } catch { diff --git a/Sources/Containerization/Mount.swift b/Sources/Containerization/Mount.swift index 951e4b12..b096ef24 100644 --- a/Sources/Containerization/Mount.swift +++ b/Sources/Containerization/Mount.swift @@ -134,10 +134,29 @@ public struct Mount: Sendable { #if os(macOS) extension Mount { + private enum StorageAttachmentType { + case diskImage + case networkBlockDevice + } + + private var storageAttachmentType: StorageAttachmentType { + let nbdSchemes = ["nbd://", "nbds://", "nbd+unix://", "nbds+unix://"] + if nbdSchemes.contains(where: { self.source.hasPrefix($0) }) { + return .networkBlockDevice + } + return .diskImage + } + func configure(config: inout VZVirtualMachineConfiguration) throws { switch self.runtimeOptions { case .virtioblk(let options): - let device = try VZDiskImageStorageDeviceAttachment.mountToVZAttachment(mount: self, options: options) + let device: VZStorageDeviceAttachment + switch self.storageAttachmentType { + case .networkBlockDevice: + device = try VZNetworkBlockDeviceStorageDeviceAttachment.mountToVZAttachment(mount: self, options: options) + case .diskImage: + device = try VZDiskImageStorageDeviceAttachment.mountToVZAttachment(mount: self, options: options) + } let attachment = VZVirtioBlockDeviceConfiguration(attachment: device) config.storageDevices.append(attachment) case .virtiofs(_): @@ -221,6 +240,68 @@ extension VZDiskImageStorageDeviceAttachment { } } +extension VZNetworkBlockDeviceStorageDeviceAttachment { + static func mountToVZAttachment(mount: Mount, options: [String]) throws -> VZNetworkBlockDeviceStorageDeviceAttachment { + guard let url = URL(string: mount.source) else { + throw ContainerizationError( + .invalidArgument, + message: "invalid NBD URL: \(mount.source)" + ) + } + + var timeout: TimeInterval = 5 + var synchronizationMode: VZDiskSynchronizationMode = .full + var forcedReadOnly = false + + for option in options { + let split = option.split(separator: "=") + if split.count != 2 { + continue + } + + let key = String(split[0]) + let value = String(split[1]) + + switch key { + case "vzTimeout": + guard let t = TimeInterval(value) else { + throw ContainerizationError( + .invalidArgument, + message: "invalid vzTimeout value for NBD device: \(value)" + ) + } + timeout = t + case "vzSynchronizationMode": + switch value { + case "full": + synchronizationMode = .full + case "none": + synchronizationMode = .none + default: + throw ContainerizationError( + .invalidArgument, + message: "unknown vzSynchronizationMode value for NBD device: \(value)" + ) + } + case "vzForcedReadOnly": + forcedReadOnly = value == "true" + default: + throw ContainerizationError( + .invalidArgument, + message: "unknown vmm option encountered: \(key)" + ) + } + } + + return try VZNetworkBlockDeviceStorageDeviceAttachment( + url: url, + timeout: timeout, + isForcedReadOnly: forcedReadOnly, + synchronizationMode: synchronizationMode + ) + } +} + #endif extension Mount { diff --git a/Sources/Integration/NBDServer.swift b/Sources/Integration/NBDServer.swift new file mode 100644 index 00000000..aed49ffd --- /dev/null +++ b/Sources/Integration/NBDServer.swift @@ -0,0 +1,381 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2026 Apple Inc. and the Containerization project authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// + +import ContainerizationError +import Foundation +import Logging +import NIOCore +import NIOPosix + +/// A minimal NBD server for integration testing. +/// +/// Serves a file-backed block device using the NBD newstyle handshake protocol. +/// Supports both TCP and Unix domain socket transports. +final class NBDServer: Sendable { + private let channel: Channel + private let socketPath: String? + private let group: EventLoopGroup + let url: String + + init(filePath: String, socketPath: String, logger: Logger? = nil) throws { + self.socketPath = socketPath + self.group = MultiThreadedEventLoopGroup(numberOfThreads: 1) + + try? FileManager.default.removeItem(atPath: socketPath) + + self.channel = try Self.bootstrap(group: self.group, filePath: filePath, logger: logger) + .bind(unixDomainSocketPath: socketPath) + .wait() + self.url = "nbd+unix:///?socket=\(socketPath)" + } + + init(filePath: String, port: Int, logger: Logger? = nil) throws { + self.socketPath = nil + self.group = MultiThreadedEventLoopGroup(numberOfThreads: 1) + + self.channel = try Self.bootstrap(group: self.group, filePath: filePath, logger: logger) + .bind(host: "127.0.0.1", port: port) + .wait() + + guard let boundPort = channel.localAddress?.port, boundPort > 0 else { + throw ContainerizationError(.internalError, message: "NBD server failed to bind to a port") + } + self.url = "nbd://127.0.0.1:\(boundPort)" + } + + func stop() { + try? channel.close().wait() + try? group.syncShutdownGracefully() + if let socketPath { + try? FileManager.default.removeItem(atPath: socketPath) + } + } + + private static func bootstrap(group: EventLoopGroup, filePath: String, logger: Logger?) -> ServerBootstrap { + ServerBootstrap(group: group) + .serverChannelOption(.socketOption(.so_reuseaddr), value: 1) + .childChannelInitializer { channel in + channel.eventLoop.makeCompletedFuture { + try channel.pipeline.syncOperations.addHandler( + NBDConnectionHandler(filePath: filePath, logger: logger) + ) + } + } + } +} + +private final class NBDConnectionHandler: ChannelInboundHandler { + typealias InboundIn = ByteBuffer + typealias OutboundOut = ByteBuffer + + // Protocol constants + static let magic: UInt64 = 0x4e42_444d_4147_4943 + static let ihaveopt: UInt64 = 0x4948_4156_454f_5054 + static let replyMagic: UInt64 = 0x3_e889_0455_65a9 + static let requestMagic: UInt32 = 0x2560_9513 + static let simpleReplyMagic: UInt32 = 0x6744_6698 + + static let optExportName: UInt32 = 1 + static let optAbort: UInt32 = 2 + static let optInfo: UInt32 = 6 + static let optGo: UInt32 = 7 + + static let cmdRead: UInt16 = 0 + static let cmdWrite: UInt16 = 1 + static let cmdDisc: UInt16 = 2 + static let cmdFlush: UInt16 = 3 + + static let flagFixedNewstyle: UInt16 = 0x1 + static let flagNoZeroes: UInt16 = 0x2 + static let clientFlagFixedNewstyle: UInt32 = 0x1 + static let clientFlagNoZeroes: UInt32 = 0x2 + static let transmitHasFlags: UInt16 = 0x1 + static let transmitSendFlush: UInt16 = 0x4 + static let transmitSendFUA: UInt16 = 0x8 + + static let repACK: UInt32 = 1 + static let repInfo: UInt32 = 3 + static let repErrUnsup: UInt32 = 0x8000_0001 + static let infoExport: UInt16 = 0 + static let infoBlockSize: UInt16 = 3 + + // NBD error codes + static let errOK: UInt32 = 0 + static let errIO: UInt32 = 5 + static let errNotsup: UInt32 = 95 + + private let fileFD: Int32 + private let fileSize: UInt64 + private let logger: Logger? + private var buffer: ByteBuffer = ByteBuffer() + private var state: ConnectionState = .handshake + + private enum ConnectionState { + case handshake + case options(noZeroes: Bool) + case transmission + } + + init(filePath: String, logger: Logger?) { + self.fileFD = open(filePath, O_RDWR) + self.logger = logger + guard fileFD >= 0 else { + self.fileSize = 0 + logger?.error("NBD server: failed to open \(filePath), errno=\(errno)") + return + } + var st = stat() + if fstat(self.fileFD, &st) == 0 { + self.fileSize = UInt64(st.st_size) + } else { + self.fileSize = 0 + } + } + + func channelActive(context: ChannelHandlerContext) { + guard fileFD >= 0 else { + context.close(promise: nil) + return + } + // Send initial handshake. + var buf = context.channel.allocator.buffer(capacity: 18) + buf.writeInteger(Self.magic) + buf.writeInteger(Self.ihaveopt) + buf.writeInteger(Self.flagFixedNewstyle | Self.flagNoZeroes) + context.writeAndFlush(wrapOutboundOut(buf), promise: nil) + } + + func channelInactive(context: ChannelHandlerContext) { + if fileFD >= 0 { + close(fileFD) + } + } + + func channelRead(context: ChannelHandlerContext, data: NIOAny) { + var incoming = unwrapInboundIn(data) + buffer.writeBuffer(&incoming) + processBuffer(context: context) + } + + private func processBuffer(context: ChannelHandlerContext) { + while true { + switch state { + case .handshake: + guard buffer.readableBytes >= 4, + let clientFlags = buffer.readInteger(as: UInt32.self) + else { + return + } + guard clientFlags & Self.clientFlagFixedNewstyle != 0 else { + context.close(promise: nil) + return + } + let noZeroes = clientFlags & Self.clientFlagNoZeroes != 0 + state = .options(noZeroes: noZeroes) + + case .options(let noZeroes): + guard buffer.readableBytes >= 16 else { + return + } + // Peek at the header without consuming. + let readerIndex = buffer.readerIndex + guard let magic = buffer.getInteger(at: readerIndex, as: UInt64.self), + let optType = buffer.getInteger(at: readerIndex + 8, as: UInt32.self), + let dataLen = buffer.getInteger(at: readerIndex + 12, as: UInt32.self) + else { + context.close(promise: nil) + return + } + + // Wait until we have the full option data. + guard buffer.readableBytes >= 16 + Int(dataLen) else { + return + } + // Consume the header. + buffer.moveReaderIndex(forwardBy: 16) + + guard magic == Self.ihaveopt else { + context.close(promise: nil) + return + } + + let transmitFlags = Self.transmitHasFlags | Self.transmitSendFlush | Self.transmitSendFUA + + switch optType { + case Self.optExportName: + if dataLen > 0 { + buffer.moveReaderIndex(forwardBy: Int(dataLen)) + } + var reply = context.channel.allocator.buffer(capacity: 10) + reply.writeInteger(fileSize) + reply.writeInteger(transmitFlags) + if !noZeroes { + reply.writeRepeatingByte(0, count: 124) + } + context.writeAndFlush(wrapOutboundOut(reply), promise: nil) + state = .transmission + + case Self.optInfo, Self.optGo: + // Parse InfoRequest to check for block size request. + var requestedBlockSize = false + if dataLen >= 6 { + let optDataStart = buffer.readerIndex + let nameLen = Int(buffer.getInteger(at: optDataStart, as: UInt32.self) ?? 0) + let infoOffset = optDataStart + 4 + nameLen + if infoOffset + 2 <= optDataStart + Int(dataLen) { + let numReqs = Int(buffer.getInteger(at: infoOffset, as: UInt16.self) ?? 0) + for i in 0.. 0 { + buffer.moveReaderIndex(forwardBy: Int(dataLen)) + } + + // Send NBD_INFO_EXPORT reply. + var exportInfo = context.channel.allocator.buffer(capacity: 32) + writeOptReply(&exportInfo, optType: optType, replyType: Self.repInfo, dataLen: 12) + exportInfo.writeInteger(Self.infoExport) + exportInfo.writeInteger(fileSize) + exportInfo.writeInteger(transmitFlags) + + // Send NBD_INFO_BLOCK_SIZE if requested. + if requestedBlockSize { + writeOptReply(&exportInfo, optType: optType, replyType: Self.repInfo, dataLen: 14) + exportInfo.writeInteger(Self.infoBlockSize) + exportInfo.writeInteger(UInt32(1)) // minimum + exportInfo.writeInteger(UInt32(4096)) // preferred + exportInfo.writeInteger(UInt32(4096 * 32)) // maximum + } + + writeOptReply(&exportInfo, optType: optType, replyType: Self.repACK, dataLen: 0) + context.writeAndFlush(wrapOutboundOut(exportInfo), promise: nil) + + if optType == Self.optGo { + state = .transmission + } + + case Self.optAbort: + if dataLen > 0 { + buffer.moveReaderIndex(forwardBy: Int(dataLen)) + } + context.close(promise: nil) + return + + default: + if dataLen > 0 { + buffer.moveReaderIndex(forwardBy: Int(dataLen)) + } + var reply = context.channel.allocator.buffer(capacity: 20) + writeOptReply(&reply, optType: optType, replyType: Self.repErrUnsup, dataLen: 0) + context.writeAndFlush(wrapOutboundOut(reply), promise: nil) + } + + case .transmission: + // Request header: 4 magic + 2 flags + 2 type + 8 cookie + 8 offset + 4 length = 28 + guard buffer.readableBytes >= 28 else { + return + } + let readerIndex = buffer.readerIndex + guard let magic = buffer.getInteger(at: readerIndex, as: UInt32.self), + let cmdType = buffer.getInteger(at: readerIndex + 6, as: UInt16.self), + let cookie = buffer.getInteger(at: readerIndex + 8, as: UInt64.self), + let offset = buffer.getInteger(at: readerIndex + 16, as: UInt64.self), + let length = buffer.getInteger(at: readerIndex + 24, as: UInt32.self) + else { + context.close(promise: nil) + return + } + guard magic == Self.requestMagic else { + context.close(promise: nil) + return + } + + switch cmdType { + case Self.cmdWrite: + // Need the full write payload before processing. + guard buffer.readableBytes >= 28 + Int(length) else { + return + } + buffer.moveReaderIndex(forwardBy: 28) + var writeData = [UInt8](repeating: 0, count: Int(length)) + buffer.readWithUnsafeReadableBytes { ptr in + writeData.withUnsafeMutableBytes { dst in + guard let dstBase = dst.baseAddress, let srcBase = ptr.baseAddress else { + return + } + _ = memcpy(dstBase, srcBase, Int(length)) + } + return Int(length) + } + let n = pwrite(fileFD, &writeData, Int(length), off_t(offset)) + var reply = context.channel.allocator.buffer(capacity: 16) + writeSimpleReply(&reply, cookie: cookie, error: n < 0 ? Self.errIO : Self.errOK) + context.writeAndFlush(wrapOutboundOut(reply), promise: nil) + + case Self.cmdRead: + buffer.moveReaderIndex(forwardBy: 28) + var readBuf = [UInt8](repeating: 0, count: Int(length)) + let n = pread(fileFD, &readBuf, Int(length), off_t(offset)) + var reply = context.channel.allocator.buffer(capacity: 16 + Int(length)) + writeSimpleReply(&reply, cookie: cookie, error: n < 0 ? Self.errIO : Self.errOK) + if n >= 0 { + reply.writeBytes(readBuf[0.. Containerization.Mount { + let clonePath = Self.testDir.appending(component: "\(testID)-\(containerID).ext4").absolutePath() + try? FileManager.default.removeItem(atPath: clonePath) + return try rootfs.clone(to: clonePath) + } + + private func createEXT4DiskImage(testID: String, name: String, size: UInt64 = 64.mib()) throws -> URL { + let diskURL = Self.testDir.appending(component: "\(testID)-\(name).ext4") + try? FileManager.default.removeItem(at: diskURL) + let formatter = try EXT4.Formatter(FilePath(diskURL.absolutePath()), minDiskSize: size) + try formatter.close() + return diskURL + } + + private func createNBDServer(testID: String, name: String, size: UInt64 = 64.mib()) throws -> (NBDServer, URL) { + let diskURL = try createEXT4DiskImage(testID: testID, name: name, size: size) + let shortID = String(testID.hashValue, radix: 36, uppercase: false) + let socketPath = "/tmp/nbd-\(shortID)-\(name).sock" + let server = try NBDServer(filePath: diskURL.path, socketPath: socketPath) + return (server, diskURL) + } + + private func readFileFromDiskImage(_ diskURL: URL, path: String) throws -> String { + let reader = try EXT4.EXT4Reader(blockDevice: FilePath(diskURL.path)) + let bytes = try reader.readFile(at: FilePath(path)) + guard let content = String(bytes: bytes, encoding: .utf8) else { + throw IntegrationError.assert(msg: "failed to decode file content from disk image at \(path)") + } + return content.trimmingCharacters(in: .whitespacesAndNewlines) + } + + private func assertVirtioBlockMount(_ output: String, path: String) throws { + guard output.contains("/dev/vd") else { + throw IntegrationError.assert(msg: "expected virtio block device (/dev/vd*) for \(path), got: \(output)") + } + } + + func testContainerNBDMount() async throws { + let id = "test-container-nbd-mount" + let bs = try await bootstrap(id) + + let (server, diskURL) = try createNBDServer(testID: id, name: "vol") + defer { server.stop() } + + let buffer = BufferWriter() + let container = try LinuxContainer(id, rootfs: bs.rootfs, vmm: bs.vmm) { config in + config.mounts.append( + Mount.block( + format: "ext4", + source: server.url, + destination: "/data" + )) + config.process.arguments = [ + "/bin/sh", "-c", + "echo hello > /data/test.txt && cat /data/test.txt && grep /data /proc/mounts", + ] + config.process.stdout = buffer + config.bootLog = bs.bootLog + } + + try await container.create() + try await container.start() + + let status = try await container.wait() + try await container.stop() + + guard status.exitCode == 0 else { + throw IntegrationError.assert(msg: "container exited with status \(status)") + } + + let output = String(data: buffer.data, encoding: .utf8)?.trimmingCharacters(in: .whitespacesAndNewlines) ?? "" + let lines = output.components(separatedBy: "\n") + + guard lines.count >= 2 else { + throw IntegrationError.assert(msg: "expected at least 2 lines of output, got: \(output)") + } + + guard lines[0] == "hello" else { + throw IntegrationError.assert(msg: "expected 'hello', got '\(lines[0])'") + } + + try assertVirtioBlockMount(lines[1], path: "/data") + + // Verify the write landed on the NBD backing file. + let diskContent = try readFileFromDiskImage(diskURL, path: "/test.txt") + guard diskContent == "hello" else { + throw IntegrationError.assert(msg: "NBD backing file: expected 'hello', got '\(diskContent)'") + } + } + + func testContainerNBDReadOnly() async throws { + let id = "test-container-nbd-readonly" + let bs = try await bootstrap(id) + + let (server, _) = try createNBDServer(testID: id, name: "ro-vol") + defer { server.stop() } + + let buffer = BufferWriter() + let container = try LinuxContainer(id, rootfs: bs.rootfs, vmm: bs.vmm) { config in + config.mounts.append( + Mount.block( + format: "ext4", + source: server.url, + destination: "/data", + options: ["ro"], + runtimeOptions: ["vzForcedReadOnly=true"] + )) + // Verify virtio block mount, then attempt a write that should fail. + config.process.arguments = [ + "/bin/sh", "-c", + "grep /data /proc/mounts; echo test > /data/fail.txt 2>&1; echo exit=$?", + ] + config.process.stdout = buffer + config.bootLog = bs.bootLog + } + + try await container.create() + try await container.start() + + _ = try await container.wait() + try await container.stop() + + let output = String(data: buffer.data, encoding: .utf8) ?? "" + let lines = output.trimmingCharacters(in: .whitespacesAndNewlines).components(separatedBy: "\n") + + guard !lines.isEmpty else { + throw IntegrationError.assert(msg: "expected output, got nothing") + } + + // First line should show the virtio block device mount. + try assertVirtioBlockMount(lines[0], path: "/data") + + // Write should have failed on a read-only mount. + guard !output.contains("exit=0") else { + throw IntegrationError.assert(msg: "write succeeded on read-only NBD mount: \(output)") + } + } + + func testPodSharedNBDVolume() async throws { + let id = "test-pod-shared-nbd-volume" + let bs = try await bootstrap(id) + + let (server, diskURL) = try createNBDServer(testID: id, name: "shared") + defer { server.stop() } + + let rootfs1 = try cloneRootfsForNBD(bs.rootfs, testID: id, containerID: "writer") + let rootfs2 = try cloneRootfsForNBD(bs.rootfs, testID: id, containerID: "reader") + + let pod = try LinuxPod(id, vmm: bs.vmm) { config in + config.cpus = 4 + config.memoryInBytes = 1024.mib() + config.bootLog = bs.bootLog + config.volumes = [ + .init( + name: "shared-data", + source: .nbd(url: URL(string: server.url)!), + format: "ext4" + ) + ] + } + + // Container 1: writes to the shared volume and verifies mount type. + let writerBuffer = BufferWriter() + try await pod.addContainer("writer", rootfs: rootfs1) { config in + config.process.arguments = [ + "/bin/sh", "-c", + "echo shared-content > /data/shared.txt && grep /data /proc/mounts", + ] + config.process.stdout = writerBuffer + config.volumeMounts = [ + .init(name: "shared-data", destination: "/data") + ] + } + + // Container 2: reads from the same shared volume at a different path and verifies mount type. + let readerBuffer = BufferWriter() + try await pod.addContainer("reader", rootfs: rootfs2) { config in + config.process.arguments = [ + "/bin/sh", "-c", + "sleep 2 && cat /shared/shared.txt && grep /shared /proc/mounts", + ] + config.process.stdout = readerBuffer + config.volumeMounts = [ + .init(name: "shared-data", destination: "/shared") + ] + } + + do { + try await pod.create() + try await pod.startContainer("writer") + try await pod.startContainer("reader") + + let writerStatus = try await pod.waitContainer("writer") + guard writerStatus.exitCode == 0 else { + throw IntegrationError.assert(msg: "writer exited with status \(writerStatus)") + } + + let readerStatus = try await pod.waitContainer("reader") + guard readerStatus.exitCode == 0 else { + throw IntegrationError.assert(msg: "reader exited with status \(readerStatus)") + } + + try await pod.stop() + } catch { + try? await pod.stop() + throw error + } + + // Verify writer output. + let writerOutput = String(data: writerBuffer.data, encoding: .utf8)?.trimmingCharacters(in: .whitespacesAndNewlines) ?? "" + let writerLines = writerOutput.components(separatedBy: "\n") + guard !writerLines.isEmpty else { + throw IntegrationError.assert(msg: "writer produced no output") + } + try assertVirtioBlockMount(writerLines.last!, path: "/data") + + // Verify reader output. + let readerOutput = String(data: readerBuffer.data, encoding: .utf8)?.trimmingCharacters(in: .whitespacesAndNewlines) ?? "" + let readerLines = readerOutput.components(separatedBy: "\n") + guard readerLines.count >= 2 else { + throw IntegrationError.assert(msg: "expected at least 2 lines from reader, got: \(readerOutput)") + } + guard readerLines[0] == "shared-content" else { + throw IntegrationError.assert(msg: "expected 'shared-content', got '\(readerLines[0])'") + } + try assertVirtioBlockMount(readerLines[1], path: "/shared") + + // Verify the write landed on the NBD backing file. + let diskContent = try readFileFromDiskImage(diskURL, path: "/shared.txt") + guard diskContent == "shared-content" else { + throw IntegrationError.assert(msg: "NBD backing file: expected 'shared-content', got '\(diskContent)'") + } + } + + func testPodMultipleNBDVolumes() async throws { + let id = "test-pod-multiple-nbd-volumes" + let bs = try await bootstrap(id) + + let (server1, diskURL1) = try createNBDServer(testID: id, name: "vol1") + defer { server1.stop() } + + let (server2, diskURL2) = try createNBDServer(testID: id, name: "vol2") + defer { server2.stop() } + + let pod = try LinuxPod(id, vmm: bs.vmm) { config in + config.cpus = 4 + config.memoryInBytes = 1024.mib() + config.bootLog = bs.bootLog + config.volumes = [ + .init( + name: "volume-a", + source: .nbd(url: URL(string: server1.url)!), + format: "ext4" + ), + .init( + name: "volume-b", + source: .nbd(url: URL(string: server2.url)!), + format: "ext4" + ), + ] + } + + let buffer = BufferWriter() + try await pod.addContainer("container1", rootfs: bs.rootfs) { config in + config.process.arguments = [ + "/bin/sh", "-c", + """ + echo aaa > /mnt-a/a.txt && echo bbb > /mnt-b/b.txt \ + && cat /mnt-a/a.txt && cat /mnt-b/b.txt \ + && grep /mnt-a /proc/mounts && grep /mnt-b /proc/mounts + """, + ] + config.process.stdout = buffer + config.volumeMounts = [ + .init(name: "volume-a", destination: "/mnt-a"), + .init(name: "volume-b", destination: "/mnt-b"), + ] + } + + do { + try await pod.create() + try await pod.startContainer("container1") + + let status = try await pod.waitContainer("container1") + guard status.exitCode == 0 else { + throw IntegrationError.assert(msg: "container exited with status \(status)") + } + + try await pod.stop() + } catch { + try? await pod.stop() + throw error + } + + let output = String(data: buffer.data, encoding: .utf8)?.trimmingCharacters(in: .whitespacesAndNewlines) ?? "" + let lines = output.components(separatedBy: "\n") + + guard lines.count >= 4 else { + throw IntegrationError.assert(msg: "expected at least 4 lines, got: \(output)") + } + + guard lines[0] == "aaa" && lines[1] == "bbb" else { + throw IntegrationError.assert(msg: "expected 'aaa\\nbbb', got '\(lines[0])\\n\(lines[1])'") + } + + try assertVirtioBlockMount(lines[2], path: "/mnt-a") + try assertVirtioBlockMount(lines[3], path: "/mnt-b") + + // Verify each write landed on the correct NBD backing file. + let diskContent1 = try readFileFromDiskImage(diskURL1, path: "/a.txt") + guard diskContent1 == "aaa" else { + throw IntegrationError.assert(msg: "NBD backing file vol1: expected 'aaa', got '\(diskContent1)'") + } + let diskContent2 = try readFileFromDiskImage(diskURL2, path: "/b.txt") + guard diskContent2 == "bbb" else { + throw IntegrationError.assert(msg: "NBD backing file vol2: expected 'bbb', got '\(diskContent2)'") + } + } + + func testPodUnreferencedVolume() async throws { + let id = "test-pod-unreferenced-volume" + let bs = try await bootstrap(id) + + let (server, _) = try createNBDServer(testID: id, name: "unused") + defer { server.stop() } + + let pod = try LinuxPod(id, vmm: bs.vmm) { config in + config.cpus = 4 + config.memoryInBytes = 1024.mib() + config.bootLog = bs.bootLog + config.volumes = [ + .init( + name: "unused-vol", + source: .nbd(url: URL(string: server.url)!), + format: "ext4" + ) + ] + } + + // Container doesn't reference the volume at all. + try await pod.addContainer("container1", rootfs: bs.rootfs) { config in + config.process.arguments = ["/bin/true"] + } + + do { + try await pod.create() + try await pod.startContainer("container1") + + let status = try await pod.waitContainer("container1") + guard status.exitCode == 0 else { + throw IntegrationError.assert(msg: "container exited with status \(status)") + } + + try await pod.stop() + } catch { + try? await pod.stop() + throw error + } + } + + func testPodNBDVolumePersistence() async throws { + let id = "test-pod-nbd-volume-persistence" + let bs = try await bootstrap(id) + + let (server, _) = try createNBDServer(testID: id, name: "persistent") + defer { server.stop() } + + let rootfs1 = try cloneRootfsForNBD(bs.rootfs, testID: id, containerID: "writer") + let rootfs2 = try cloneRootfsForNBD(bs.rootfs, testID: id, containerID: "reader") + + let pod = try LinuxPod(id, vmm: bs.vmm) { config in + config.cpus = 4 + config.memoryInBytes = 1024.mib() + config.bootLog = bs.bootLog + config.volumes = [ + .init( + name: "persistent-data", + source: .nbd(url: URL(string: server.url)!), + format: "ext4" + ) + ] + } + + // First container: write data to the volume. + try await pod.addContainer("writer", rootfs: rootfs1) { config in + config.process.arguments = ["/bin/sh", "-c", "echo persisted > /data/file.txt && sync"] + config.volumeMounts = [ + .init(name: "persistent-data", destination: "/data") + ] + } + + // Second container: will read the data after the first is stopped. + let readerBuffer = BufferWriter() + try await pod.addContainer("reader", rootfs: rootfs2) { config in + config.process.arguments = ["/bin/sh", "-c", "cat /data/file.txt"] + config.process.stdout = readerBuffer + config.volumeMounts = [ + .init(name: "persistent-data", destination: "/data") + ] + } + + do { + try await pod.create() + + // Start writer, wait for it to finish, then stop it. + try await pod.startContainer("writer") + let writerStatus = try await pod.waitContainer("writer") + guard writerStatus.exitCode == 0 else { + throw IntegrationError.assert(msg: "writer exited with status \(writerStatus)") + } + try await pod.stopContainer("writer") + + // Start reader after writer is stopped — data should persist on the volume. + try await pod.startContainer("reader") + let readerStatus = try await pod.waitContainer("reader") + guard readerStatus.exitCode == 0 else { + throw IntegrationError.assert(msg: "reader exited with status \(readerStatus)") + } + + try await pod.stop() + } catch { + try? await pod.stop() + throw error + } + + let output = String(data: readerBuffer.data, encoding: .utf8)?.trimmingCharacters(in: .whitespacesAndNewlines) + guard output == "persisted" else { + throw IntegrationError.assert(msg: "expected 'persisted', got '\(output ?? "")'") + } + } + + func testPodNBDConcurrentWrites() async throws { + let id = "test-pod-nbd-concurrent-writes" + let bs = try await bootstrap(id) + + let (server, _) = try createNBDServer(testID: id, name: "shared") + defer { server.stop() } + + let rootfs1 = try cloneRootfsForNBD(bs.rootfs, testID: id, containerID: "c1") + let rootfs2 = try cloneRootfsForNBD(bs.rootfs, testID: id, containerID: "c2") + + let pod = try LinuxPod(id, vmm: bs.vmm) { config in + config.cpus = 4 + config.memoryInBytes = 1024.mib() + config.bootLog = bs.bootLog + config.volumes = [ + .init( + name: "shared-vol", + source: .nbd(url: URL(string: server.url)!), + format: "ext4" + ) + ] + } + + // Both containers write to different files on the same volume concurrently. + let buffer1 = BufferWriter() + try await pod.addContainer("c1", rootfs: rootfs1) { config in + config.process.arguments = [ + "/bin/sh", "-c", + "echo from-c1 > /vol/c1.txt && sync && cat /vol/c1.txt", + ] + config.process.stdout = buffer1 + config.volumeMounts = [ + .init(name: "shared-vol", destination: "/vol") + ] + } + + let buffer2 = BufferWriter() + try await pod.addContainer("c2", rootfs: rootfs2) { config in + config.process.arguments = [ + "/bin/sh", "-c", + "echo from-c2 > /vol/c2.txt && sync && cat /vol/c2.txt", + ] + config.process.stdout = buffer2 + config.volumeMounts = [ + .init(name: "shared-vol", destination: "/vol") + ] + } + + do { + try await pod.create() + try await pod.startContainer("c1") + try await pod.startContainer("c2") + + let status1 = try await pod.waitContainer("c1") + guard status1.exitCode == 0 else { + throw IntegrationError.assert(msg: "c1 exited with status \(status1)") + } + + let status2 = try await pod.waitContainer("c2") + guard status2.exitCode == 0 else { + throw IntegrationError.assert(msg: "c2 exited with status \(status2)") + } + + try await pod.stop() + } catch { + try? await pod.stop() + throw error + } + + let output1 = String(data: buffer1.data, encoding: .utf8)?.trimmingCharacters(in: .whitespacesAndNewlines) + guard output1 == "from-c1" else { + throw IntegrationError.assert(msg: "c1: expected 'from-c1', got '\(output1 ?? "")'") + } + + let output2 = String(data: buffer2.data, encoding: .utf8)?.trimmingCharacters(in: .whitespacesAndNewlines) + guard output2 == "from-c2" else { + throw IntegrationError.assert(msg: "c2: expected 'from-c2', got '\(output2 ?? "")'") + } + } +} diff --git a/Sources/Integration/PodTests.swift b/Sources/Integration/PodTests.swift index 9bd03192..0384d46e 100644 --- a/Sources/Integration/PodTests.swift +++ b/Sources/Integration/PodTests.swift @@ -2032,4 +2032,61 @@ extension IntegrationSuite { throw error } } + + func testPodInvalidVolumeReference() async throws { + let id = "test-pod-invalid-volume-ref" + let bs = try await bootstrap(id) + + let pod = try LinuxPod(id, vmm: bs.vmm) { config in + config.cpus = 4 + config.memoryInBytes = 1024.mib() + config.bootLog = bs.bootLog + } + + try await pod.addContainer("container1", rootfs: bs.rootfs) { config in + config.process.arguments = ["/bin/true"] + config.volumeMounts = [ + .init(name: "nonexistent-volume", destination: "/data") + ] + } + + do { + try await pod.create() + try? await pod.stop() + throw IntegrationError.assert(msg: "expected create() to fail for invalid volume reference") + } catch let error as ContainerizationError { + guard error.code == .invalidArgument else { + throw IntegrationError.assert(msg: "expected invalidArgument error, got: \(error)") + } + } + } + + func testPodDuplicateVolumeName() async throws { + let id = "test-pod-duplicate-volume-name" + let bs = try await bootstrap(id) + + let pod = try LinuxPod(id, vmm: bs.vmm) { config in + config.cpus = 4 + config.memoryInBytes = 1024.mib() + config.bootLog = bs.bootLog + config.volumes = [ + .init(name: "data", source: .nbd(url: URL(string: "nbd://localhost:10809")!), format: "ext4"), + .init(name: "data", source: .nbd(url: URL(string: "nbd://localhost:10809")!), format: "ext4"), + ] + } + + try await pod.addContainer("container1", rootfs: bs.rootfs) { config in + config.process.arguments = ["/bin/true"] + } + + do { + try await pod.create() + try? await pod.stop() + throw IntegrationError.assert(msg: "expected create() to fail for duplicate volume name") + } catch let error as ContainerizationError { + guard error.code == .invalidArgument else { + throw IntegrationError.assert(msg: "expected invalidArgument error, got: \(error)") + } + } + } } diff --git a/Sources/Integration/Suite.swift b/Sources/Integration/Suite.swift index 85341144..8ba4acaa 100644 --- a/Sources/Integration/Suite.swift +++ b/Sources/Integration/Suite.swift @@ -376,6 +376,8 @@ struct IntegrationSuite: AsyncParsableCommand { Test("container workingDir created", testWorkingDirCreated), Test("container workingDir exec created", testWorkingDirExecCreated), Test("container mount sort by depth", testMountsSortedByDepth), + Test("container NBD mount", testContainerNBDMount), + Test("container NBD read-only", testContainerNBDReadOnly), // Pods Test("pod single container", testPodSingleContainer), @@ -417,6 +419,13 @@ struct IntegrationSuite: AsyncParsableCommand { Test("pod unix socket into guest symlink", testPodUnixSocketIntoGuestSymlink), Test("pod sysctl", testPodSysctl), Test("pod sysctl multiple containers", testPodSysctlMultipleContainers), + Test("pod shared NBD volume", testPodSharedNBDVolume), + Test("pod multiple NBD volumes", testPodMultipleNBDVolumes), + Test("pod unreferenced NBD volume", testPodUnreferencedVolume), + Test("pod NBD volume persistence", testPodNBDVolumePersistence), + Test("pod NBD concurrent writes", testPodNBDConcurrentWrites), + Test("pod invalid volume reference", testPodInvalidVolumeReference), + Test("pod duplicate volume name", testPodDuplicateVolumeName), ] + macOS26Tests() let filteredTests: [Test] diff --git a/Tests/ContainerizationTests/MountTests.swift b/Tests/ContainerizationTests/MountTests.swift index ae8b17be..6ed7ae90 100644 --- a/Tests/ContainerizationTests/MountTests.swift +++ b/Tests/ContainerizationTests/MountTests.swift @@ -99,3 +99,194 @@ struct MountTests { #expect(sorted.map(\.destination) == ["/", "/foo", "/tmp/bar/baz"]) } } + +@Suite("AttachedFilesystem runtimeOptions dispatch") +struct AttachedFilesystemTests { + + @Test func virtioblkMountAllocatesBlockDevice() throws { + let mount = Mount.block( + format: "ext4", + source: "/path/to/disk.img", + destination: "/data" + ) + let allocator = Character.blockDeviceTagAllocator() + let attached = try AttachedFilesystem(mount: mount, allocator: allocator) + + #expect(attached.source == "/dev/vda") + #expect(attached.type == "ext4") + #expect(attached.destination == "/data") + } + + @Test func nbdMountAllocatesBlockDevice() throws { + let mount = Mount.block( + format: "ext4", + source: "nbd://localhost:10809", + destination: "/data" + ) + let allocator = Character.blockDeviceTagAllocator() + let attached = try AttachedFilesystem(mount: mount, allocator: allocator) + + #expect(attached.source == "/dev/vda") + #expect(attached.type == "ext4") + #expect(attached.destination == "/data") + } + + @Test func nbdMountWithNonExt4FormatAllocatesBlockDevice() throws { + let mount = Mount.block( + format: "xfs", + source: "nbd://localhost:10809", + destination: "/data" + ) + let allocator = Character.blockDeviceTagAllocator() + let attached = try AttachedFilesystem(mount: mount, allocator: allocator) + + #expect(attached.source == "/dev/vda") + #expect(attached.type == "xfs") + } + + @Test func multipleBlockDevicesAllocateSequentially() throws { + let allocator = Character.blockDeviceTagAllocator() + + let m1 = Mount.block(format: "ext4", source: "/disk1.img", destination: "/a") + let m2 = Mount.block(format: "ext4", source: "nbd://host:10809", destination: "/b") + let m3 = Mount.block(format: "ext4", source: "/disk2.img", destination: "/c") + + let a1 = try AttachedFilesystem(mount: m1, allocator: allocator) + let a2 = try AttachedFilesystem(mount: m2, allocator: allocator) + let a3 = try AttachedFilesystem(mount: m3, allocator: allocator) + + #expect(a1.source == "/dev/vda") + #expect(a2.source == "/dev/vdb") + #expect(a3.source == "/dev/vdc") + } + + @Test func anyMountUsesSourceDirectly() throws { + let mount = Mount.any( + type: "tmpfs", + source: "tmpfs", + destination: "/tmp" + ) + let allocator = Character.blockDeviceTagAllocator() + let attached = try AttachedFilesystem(mount: mount, allocator: allocator) + + #expect(attached.source == "tmpfs") + } +} + +@Suite("PodVolume and VolumeMount types") +struct PodVolumeTests { + + @Test func podVolumeNBDSourceCreation() { + let volume = LinuxPod.PodVolume( + name: "shared-data", + source: .nbd(url: URL(string: "nbd://localhost:10809")!), + format: "ext4" + ) + + #expect(volume.name == "shared-data") + #expect(volume.format == "ext4") + if case .nbd(let url, let timeout, let readOnly) = volume.source { + #expect(url.absoluteString == "nbd://localhost:10809") + #expect(timeout == nil) + #expect(readOnly == false) + } else { + Issue.record("Expected .nbd source") + } + } + + @Test func podVolumeNBDSourceWithOptions() { + let volume = LinuxPod.PodVolume( + name: "data", + source: .nbd(url: URL(string: "nbd://host:10809")!, timeout: 30, readOnly: true), + format: "xfs" + ) + + if case .nbd(_, let timeout, let readOnly) = volume.source { + #expect(timeout == 30) + #expect(readOnly == true) + } else { + Issue.record("Expected .nbd source") + } + } + + @Test func podVolumeToMountConvertsCorrectly() { + let volume = LinuxPod.PodVolume( + name: "my-vol", + source: .nbd(url: URL(string: "nbd://host:10809/export")!), + format: "ext4" + ) + + let mount = volume.toMount() + + #expect(mount.source == "nbd://host:10809/export") + #expect(mount.destination == "/run/volumes/my-vol") + #expect(mount.type == "ext4") + #expect(mount.isBlock) + } + + @Test func podVolumeToMountWithReadOnlySetsOptions() { + let volume = LinuxPod.PodVolume( + name: "ro-vol", + source: .nbd(url: URL(string: "nbd://host:10809")!, readOnly: true), + format: "ext4" + ) + + let mount = volume.toMount() + + #expect(mount.options.contains("ro")) + if case .virtioblk(let opts) = mount.runtimeOptions { + #expect(opts.contains("vzForcedReadOnly=true")) + } else { + Issue.record("Expected virtioblk runtime options") + } + } + + @Test func podVolumeToMountWithTimeoutSetsRuntimeOption() { + let volume = LinuxPod.PodVolume( + name: "data", + source: .nbd(url: URL(string: "nbd://host:10809")!, timeout: 60), + format: "ext4" + ) + + let mount = volume.toMount() + + if case .virtioblk(let opts) = mount.runtimeOptions { + #expect(opts.contains("vzTimeout=60.0")) + } else { + Issue.record("Expected virtioblk runtime options") + } + } + + @Test func podVolumeToMountUsesMountType() { + let volume = LinuxPod.PodVolume( + name: "data", + source: .nbd(url: URL(string: "nbd://host:10809")!), + format: "xfs" + ) + + let mount = volume.toMount() + + #expect(mount.type == "xfs") + } + + @Test func volumeMountCreation() { + let vm = LinuxPod.VolumeMount( + name: "shared", + destination: "/data", + options: ["ro"] + ) + + #expect(vm.name == "shared") + #expect(vm.destination == "/data") + #expect(vm.options == ["ro"]) + } + + @Test func volumeMountDefaultOptions() { + let vm = LinuxPod.VolumeMount( + name: "data", + destination: "/mnt" + ) + + #expect(vm.options.isEmpty) + } +}