mirror of https://github.com/cirruslabs/tart.git
Merge branch 'cirruslabs:main' into main
This commit is contained in:
commit
9fccb59c8e
|
|
@ -1,7 +1,7 @@
|
|||
use_compute_credits: true
|
||||
|
||||
task:
|
||||
name: Test on Sequoia
|
||||
name: Test
|
||||
alias: test
|
||||
persistent_worker:
|
||||
labels:
|
||||
|
|
|
|||
|
|
@ -31,6 +31,9 @@ struct Clone: AsyncParsableCommand {
|
|||
@Flag(help: .hidden)
|
||||
var deduplicate: Bool = false
|
||||
|
||||
@Option(help: ArgumentHelp("limit automatic pruning to n gigabytes", valueName: "n"))
|
||||
var pruneLimit: UInt = 100
|
||||
|
||||
func validate() throws {
|
||||
if newName.contains("/") {
|
||||
throw ValidationError("<new-name> should be a local name")
|
||||
|
|
@ -42,8 +45,8 @@ struct Clone: AsyncParsableCommand {
|
|||
}
|
||||
|
||||
func run() async throws {
|
||||
let ociStorage = VMStorageOCI()
|
||||
let localStorage = VMStorageLocal()
|
||||
let ociStorage = try VMStorageOCI()
|
||||
let localStorage = try VMStorageLocal()
|
||||
|
||||
if let remoteName = try? RemoteName(sourceName), !ociStorage.exists(remoteName) {
|
||||
// Pull the VM in case it's OCI-based and doesn't exist locally yet
|
||||
|
|
@ -76,8 +79,10 @@ struct Clone: AsyncParsableCommand {
|
|||
//
|
||||
// So, once we clone the VM let's try to claim the rest of space for the VM to run without errors.
|
||||
let unallocatedBytes = try sourceVM.sizeBytes() - sourceVM.allocatedSizeBytes()
|
||||
if unallocatedBytes > 0 {
|
||||
try Prune.reclaimIfNeeded(UInt64(unallocatedBytes), sourceVM)
|
||||
// Avoid reclaiming an excessive amount of disk space.
|
||||
let reclaimBytes = min(unallocatedBytes, Int(pruneLimit) * 1024 * 1024 * 1024)
|
||||
if reclaimBytes > 0 {
|
||||
try Prune.reclaimIfNeeded(UInt64(reclaimBytes), sourceVM)
|
||||
}
|
||||
}, onCancel: {
|
||||
try? FileManager.default.removeItem(at: tmpVMDir.baseURL)
|
||||
|
|
|
|||
|
|
@ -87,11 +87,13 @@ struct Exec: AsyncParsableCommand {
|
|||
$0.args = Array(command.dropFirst(1))
|
||||
$0.interactive = interactive
|
||||
$0.tty = tty
|
||||
$0.terminalSize = .with {
|
||||
let (width, height) = try! Term.GetSize()
|
||||
if tty {
|
||||
$0.terminalSize = .with {
|
||||
let (width, height) = try! Term.GetSize()
|
||||
|
||||
$0.cols = UInt32(width)
|
||||
$0.rows = UInt32(height)
|
||||
$0.cols = UInt32(width)
|
||||
$0.rows = UInt32(height)
|
||||
}
|
||||
}
|
||||
})
|
||||
})
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ struct Import: AsyncParsableCommand {
|
|||
}
|
||||
|
||||
func run() async throws {
|
||||
let localStorage = VMStorageLocal()
|
||||
let localStorage = try VMStorageLocal()
|
||||
|
||||
// Create a temporary VM directory to which we will load the export file
|
||||
let tmpVMDir = try VMDirectory.temporary()
|
||||
|
|
|
|||
|
|
@ -64,6 +64,8 @@ struct Login: AsyncParsableCommand {
|
|||
}
|
||||
|
||||
fileprivate class DictionaryCredentialsProvider: CredentialsProvider {
|
||||
let userFriendlyName = "static dictionary credentials provider"
|
||||
|
||||
var credentials: Dictionary<String, (String, String)>
|
||||
|
||||
init(_ credentials: Dictionary<String, (String, String)>) {
|
||||
|
|
|
|||
|
|
@ -53,9 +53,9 @@ struct Prune: AsyncParsableCommand {
|
|||
|
||||
switch entries {
|
||||
case "caches":
|
||||
prunableStorages = [VMStorageOCI(), try IPSWCache()]
|
||||
prunableStorages = [try VMStorageOCI(), try IPSWCache()]
|
||||
case "vms":
|
||||
prunableStorages = [VMStorageLocal()]
|
||||
prunableStorages = [try VMStorageLocal()]
|
||||
default:
|
||||
throw ValidationError("unsupported --entries value, please specify either \"caches\" or \"vms\"")
|
||||
}
|
||||
|
|
@ -152,7 +152,7 @@ struct Prune: AsyncParsableCommand {
|
|||
let transaction = SentrySDK.startTransaction(name: "Pruning cache", operation: "prune", bindToScope: true)
|
||||
defer { transaction.finish() }
|
||||
|
||||
let prunableStorages: [PrunableStorage] = [VMStorageOCI(), try IPSWCache()]
|
||||
let prunableStorages: [PrunableStorage] = [try VMStorageOCI(), try IPSWCache()]
|
||||
let prunables: [Prunable] = try prunableStorages
|
||||
.flatMap { try $0.prunables() }
|
||||
.sorted { try $0.accessDate() < $1.accessDate() }
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ struct Pull: AsyncParsableCommand {
|
|||
func run() async throws {
|
||||
// Be more liberal when accepting local image as argument,
|
||||
// see https://github.com/cirruslabs/tart/issues/36
|
||||
if VMStorageLocal().exists(remoteName) {
|
||||
if try VMStorageLocal().exists(remoteName) {
|
||||
print("\"\(remoteName)\" is a local image, nothing to pull here!")
|
||||
|
||||
return
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ struct Push: AsyncParsableCommand {
|
|||
var populateCache: Bool = false
|
||||
|
||||
func run() async throws {
|
||||
let ociStorage = VMStorageOCI()
|
||||
let ociStorage = try VMStorageOCI()
|
||||
let localVMDir = try VMStorageHelper.open(localName)
|
||||
let lock = try localVMDir.lock()
|
||||
if try !lock.trylock() {
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ struct Rename: AsyncParsableCommand {
|
|||
}
|
||||
|
||||
func run() async throws {
|
||||
let localStorage = VMStorageLocal()
|
||||
let localStorage = try VMStorageLocal()
|
||||
|
||||
if !localStorage.exists(name) {
|
||||
throw ValidationError("failed to rename a non-existent local VM: \(name)")
|
||||
|
|
|
|||
|
|
@ -149,11 +149,12 @@ struct Run: AsyncParsableCommand {
|
|||
|
||||
Learn how to create a disk image using Disk Utility here: https://support.apple.com/en-gb/guide/disk-utility/dskutl11888/mac
|
||||
|
||||
To work with block devices, the easiest way is to modify their permissions (e.g. by using "sudo chown $USER /dev/diskX") or to run the Tart binary as root, which affects locating Tart VMs.
|
||||
To work with block devices, the easiest way is to modify their permissions to be accessible to the current user:
|
||||
|
||||
To work around this pass TART_HOME explicitly:
|
||||
sudo chown $USER /dev/diskX
|
||||
tart run sequoia --disk=/dev/diskX
|
||||
|
||||
sudo TART_HOME="$HOME/.tart" tart run sequoia --disk=/dev/disk0
|
||||
Warning: after running the chown command above, all software running under the current user will be able to access /dev/diskX. If that violates your threat model, we recommend avoiding mounting block devices altogether.
|
||||
""", valueName: "path[:options]"), completion: .file())
|
||||
var disk: [String] = []
|
||||
|
||||
|
|
@ -216,15 +217,28 @@ struct Run: AsyncParsableCommand {
|
|||
"""))
|
||||
var netSoftnet: Bool = false
|
||||
|
||||
@Option(help: ArgumentHelp("Comma-separated list of CIDRs to allow the traffic to when using Softnet isolation\n(e.g. --net-softnet-allow=192.168.0.0/24)", discussion: """
|
||||
@Option(help: ArgumentHelp("Comma-separated list of CIDRs to allow the traffic to when using Softnet isolation (e.g. --net-softnet-allow=192.168.0.0/24)", discussion: """
|
||||
This option allows you bypass the private IPv4 address space restrictions imposed by --net-softnet.
|
||||
|
||||
For example, you can allow the VM to communicate with the local network with e.g. --net-softnet-allow=10.0.0.0/16 or to completely disable the destination based restrictions with --net-softnet-allow=0.0.0.0/0.
|
||||
For example, you can allow the VM to communicate with the local network with e.g. --net-softnet-allow=10.0.0.0/16 or with --net-softnet-allow=0.0.0.0/0 to completely disable the destination based restrictions, including VMs bridge isolation.
|
||||
|
||||
When used with --net-softnet-block, the longest prefix match always wins. In case the same prefix is both allowed and blocked, blocking takes precedence.
|
||||
|
||||
Implies --net-softnet.
|
||||
""", valueName: "comma-separated CIDRs"))
|
||||
var netSoftnetAllow: String?
|
||||
|
||||
@Option(help: ArgumentHelp("Comma-separated list of CIDRs to block the traffic to when using Softnet isolation (e.g. --net-softnet-block=66.66.0.0/16)", discussion: """
|
||||
This option allows you to tighten the IPv4 address space restrictions imposed by --net-softnet even further.
|
||||
|
||||
For example --net-softnet-block=0.0.0.0/0 may be used to establish a default deny policy that is further relaxed with --net-softnet-allow.
|
||||
|
||||
When used with --net-softnet-allow, the longest prefix match always wins. In case the same prefix is both allowed and blocked, blocking takes precedence.
|
||||
|
||||
Implies --net-softnet.
|
||||
""", valueName: "comma-separated CIDRs"))
|
||||
var netSoftnetBlock: String?
|
||||
|
||||
@Option(help: ArgumentHelp("Comma-separated list of TCP ports to expose (e.g. --net-softnet-expose 2222:22,8080:80)", discussion: """
|
||||
Options are comma-separated and are as follows:
|
||||
|
||||
|
|
@ -280,13 +294,19 @@ struct Run: AsyncParsableCommand {
|
|||
#endif
|
||||
var noTrackpad: Bool = false
|
||||
|
||||
@Flag(help: ArgumentHelp("Disable the pointer"))
|
||||
var noPointer: Bool = false
|
||||
|
||||
@Flag(help: ArgumentHelp("Disable the keyboard"))
|
||||
var noKeyboard: Bool = false
|
||||
|
||||
mutating func validate() throws {
|
||||
if vnc && vncExperimental {
|
||||
throw ValidationError("--vnc and --vnc-experimental are mutually exclusive")
|
||||
}
|
||||
|
||||
// Automatically enable --net-softnet when any of its related options are specified
|
||||
if netSoftnetAllow != nil || netSoftnetExpose != nil {
|
||||
if netSoftnetAllow != nil || netSoftnetBlock != nil || netSoftnetExpose != nil {
|
||||
netSoftnet = true
|
||||
}
|
||||
|
||||
|
|
@ -316,7 +336,7 @@ struct Run: AsyncParsableCommand {
|
|||
}
|
||||
}
|
||||
|
||||
let localStorage = VMStorageLocal()
|
||||
let localStorage = try VMStorageLocal()
|
||||
let vmDir = try localStorage.open(name)
|
||||
if try vmDir.state() == .Suspended {
|
||||
suspendable = true
|
||||
|
|
@ -331,8 +351,15 @@ struct Run: AsyncParsableCommand {
|
|||
if noTrackpad {
|
||||
throw ValidationError("--no-trackpad cannot be used with --suspendable")
|
||||
}
|
||||
if noKeyboard {
|
||||
throw ValidationError("--no-keyboard cannot be used with --suspendable")
|
||||
}
|
||||
if noPointer {
|
||||
throw ValidationError("--no-pointer cannot be used with --suspendable")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if noTrackpad {
|
||||
let config = try VMConfig.init(fromURL: vmDir.configURL)
|
||||
if config.os != .darwin {
|
||||
|
|
@ -349,7 +376,7 @@ struct Run: AsyncParsableCommand {
|
|||
|
||||
@MainActor
|
||||
func run() async throws {
|
||||
let localStorage = VMStorageLocal()
|
||||
let localStorage = try VMStorageLocal()
|
||||
let vmDir = try localStorage.open(name)
|
||||
|
||||
// Validate disk format support
|
||||
|
|
@ -408,7 +435,9 @@ struct Run: AsyncParsableCommand {
|
|||
clipboard: !noClipboard,
|
||||
sync: VZDiskImageSynchronizationMode(diskOptions.syncModeRaw),
|
||||
caching: VZDiskImageCachingMode(diskOptions.cachingModeRaw),
|
||||
noTrackpad: noTrackpad
|
||||
noTrackpad: noTrackpad,
|
||||
noPointer: noPointer,
|
||||
noKeyboard: noKeyboard
|
||||
)
|
||||
|
||||
let vncImpl: VNC? = try {
|
||||
|
|
@ -609,6 +638,10 @@ struct Run: AsyncParsableCommand {
|
|||
softnetExtraArguments += ["--allow", netSoftnetAllow]
|
||||
}
|
||||
|
||||
if let netSoftnetBlock = netSoftnetBlock {
|
||||
softnetExtraArguments += ["--block", netSoftnetBlock]
|
||||
}
|
||||
|
||||
if let netSoftnetExpose = netSoftnetExpose {
|
||||
softnetExtraArguments += ["--expose", netSoftnetExpose]
|
||||
}
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ struct Set: AsyncParsableCommand {
|
|||
@Option(help: "VM memory size in megabytes")
|
||||
var memory: UInt64?
|
||||
|
||||
@Option(help: "VM display resolution in a format of <width>x<height>. For example, 1200x800")
|
||||
@Option(help: "VM display resolution in a format of WIDTHxHEIGHT[pt|px]. For example, 1200x800, 1200x800pt or 1920x1080px. Units are treated as hints and default to \"pt\" (points) for macOS VMs and \"px\" (pixels) for Linux VMs when not specified.")
|
||||
var display: VMDisplayConfig?
|
||||
|
||||
@Flag(inversion: .prefixedNo, help: ArgumentHelp("Whether to automatically reconfigure the VM's display to fit the window"))
|
||||
|
|
@ -59,6 +59,7 @@ struct Set: AsyncParsableCommand {
|
|||
if (display.height > 0) {
|
||||
vmConfig.display.height = display.height
|
||||
}
|
||||
vmConfig.display.unit = display.unit
|
||||
}
|
||||
|
||||
vmConfig.displayRefit = displayRefit
|
||||
|
|
@ -95,12 +96,24 @@ struct Set: AsyncParsableCommand {
|
|||
|
||||
extension VMDisplayConfig: ExpressibleByArgument {
|
||||
public init(argument: String) {
|
||||
var argument = argument
|
||||
var unit: Unit? = nil
|
||||
|
||||
if argument.hasSuffix(Unit.pixel.rawValue) {
|
||||
argument = String(argument.dropLast(Unit.pixel.rawValue.count))
|
||||
unit = Unit.pixel
|
||||
} else if argument.hasSuffix(Unit.point.rawValue) {
|
||||
argument = String(argument.dropLast(Unit.point.rawValue.count))
|
||||
unit = Unit.point
|
||||
}
|
||||
|
||||
let parts = argument.components(separatedBy: "x").map {
|
||||
Int($0) ?? 0
|
||||
}
|
||||
self = VMDisplayConfig(
|
||||
width: parts[safe: 0] ?? 0,
|
||||
height: parts[safe: 1] ?? 0
|
||||
height: parts[safe: 1] ?? 0,
|
||||
unit: unit,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -9,7 +9,8 @@ struct Config {
|
|||
var tartHomeDir: URL
|
||||
|
||||
if let customTartHome = ProcessInfo.processInfo.environment["TART_HOME"] {
|
||||
tartHomeDir = URL(fileURLWithPath: customTartHome)
|
||||
tartHomeDir = URL(fileURLWithPath: customTartHome, isDirectory: true)
|
||||
try Self.validateTartHome(url: tartHomeDir)
|
||||
} else {
|
||||
tartHomeDir = FileManager.default
|
||||
.homeDirectoryForCurrentUser
|
||||
|
|
@ -49,4 +50,24 @@ struct Config {
|
|||
static func jsonDecoder() -> JSONDecoder {
|
||||
JSONDecoder()
|
||||
}
|
||||
|
||||
private static func validateTartHome(url: URL) throws {
|
||||
let urlComponents = url.pathComponents
|
||||
|
||||
let descendingURLs = urlComponents.indices.map { i in
|
||||
URL(fileURLWithPath: urlComponents[0...i].joined(separator: "/"))
|
||||
}
|
||||
|
||||
for descendingURL in descendingURLs {
|
||||
if FileManager.default.fileExists(atPath: descendingURL.path) {
|
||||
continue
|
||||
}
|
||||
|
||||
do {
|
||||
try FileManager.default.createDirectory(at: descendingURL, withIntermediateDirectories: false)
|
||||
} catch {
|
||||
throw RuntimeError.Generic("TART_HOME is invalid: \(descendingURL.path) does not exist, yet we can't create it: \(error.localizedDescription)")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ enum CredentialsProviderError: Error {
|
|||
}
|
||||
|
||||
protocol CredentialsProvider {
|
||||
var userFriendlyName: String { get }
|
||||
func retrieve(host: String) throws -> (String, String)?
|
||||
func store(host: String, user: String, password: String) throws
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,8 @@
|
|||
import Foundation
|
||||
|
||||
class DockerConfigCredentialsProvider: CredentialsProvider {
|
||||
let userFriendlyName = "Docker configuration credentials provider"
|
||||
|
||||
func retrieve(host: String) throws -> (String, String)? {
|
||||
let dockerConfigURL = FileManager.default.homeDirectoryForCurrentUser.appendingPathComponent(".docker").appendingPathComponent("config.json")
|
||||
if !FileManager.default.fileExists(atPath: dockerConfigURL.path) {
|
||||
|
|
|
|||
|
|
@ -1,6 +1,8 @@
|
|||
import Foundation
|
||||
|
||||
class EnvironmentCredentialsProvider: CredentialsProvider {
|
||||
let userFriendlyName = "environment variable credentials provider"
|
||||
|
||||
func retrieve(host: String) throws -> (String, String)? {
|
||||
if let tartRegistryHostname = ProcessInfo.processInfo.environment["TART_REGISTRY_HOSTNAME"],
|
||||
tartRegistryHostname != host {
|
||||
|
|
|
|||
|
|
@ -1,6 +1,8 @@
|
|||
import Foundation
|
||||
|
||||
class KeychainCredentialsProvider: CredentialsProvider {
|
||||
let userFriendlyName = "Keychain credentials provider"
|
||||
|
||||
func retrieve(host: String) throws -> (String, String)? {
|
||||
let query: [String: Any] = [kSecClass as String: kSecClassInternetPassword,
|
||||
kSecAttrProtocol as String: kSecAttrProtocolHTTPS,
|
||||
|
|
|
|||
|
|
@ -6,6 +6,8 @@ enum StdinCredentialsError: Error {
|
|||
}
|
||||
|
||||
class StdinCredentials {
|
||||
let userFriendlyName = "standard input credentials provider"
|
||||
|
||||
static func retrieve() throws -> (String, String) {
|
||||
let user = try readStdinCredential(name: "username", prompt: "User: ", isSensitive: false)
|
||||
let password = try readStdinCredential(name: "password", prompt: "Password: ", isSensitive: true)
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ enum DiskImageFormat: String, CaseIterable, Codable {
|
|||
case .raw:
|
||||
return true
|
||||
case .asif:
|
||||
if #available(macOS 15, *) {
|
||||
if #available(macOS 26, *) {
|
||||
return true
|
||||
} else {
|
||||
return false
|
||||
|
|
|
|||
|
|
@ -4,18 +4,28 @@ public class ProgressObserver: NSObject {
|
|||
@objc var progressToObserve: Progress
|
||||
var observation: NSKeyValueObservation?
|
||||
var lastTimeUpdated = Date.now
|
||||
private var lastRenderedLine: String?
|
||||
|
||||
public init(_ progress: Progress) {
|
||||
progressToObserve = progress
|
||||
}
|
||||
|
||||
func log(_ renderer: Logger) {
|
||||
renderer.appendNewLine(ProgressObserver.lineToRender(progressToObserve))
|
||||
let initialLine = ProgressObserver.lineToRender(progressToObserve)
|
||||
renderer.appendNewLine(initialLine)
|
||||
lastRenderedLine = initialLine
|
||||
observation = observe(\.progressToObserve.fractionCompleted) { progress, _ in
|
||||
let currentTime = Date.now
|
||||
if self.progressToObserve.isFinished || currentTime.timeIntervalSince(self.lastTimeUpdated) >= 1.0 {
|
||||
self.lastTimeUpdated = currentTime
|
||||
renderer.updateLastLine(ProgressObserver.lineToRender(self.progressToObserve))
|
||||
let line = ProgressObserver.lineToRender(self.progressToObserve)
|
||||
// Skip identical renders so non-interactive logs only see new percent values.
|
||||
if line == self.lastRenderedLine {
|
||||
return
|
||||
}
|
||||
|
||||
self.lastRenderedLine = line
|
||||
renderer.updateLastLine(line)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -429,8 +429,12 @@ class Registry {
|
|||
}
|
||||
|
||||
for provider in credentialsProviders {
|
||||
if let (user, password) = try provider.retrieve(host: host) {
|
||||
return (user, password)
|
||||
do {
|
||||
if let (user, password) = try provider.retrieve(host: host) {
|
||||
return (user, password)
|
||||
}
|
||||
} catch (let e) {
|
||||
print("Failed to retrieve credentials using \(provider.userFriendlyName), authentication may fail: \(e)")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -82,7 +82,7 @@ struct UnsupportedHostOSError: Error, CustomStringConvertible {
|
|||
func graphicsDevice(vmConfig: VMConfig) -> VZGraphicsDeviceConfiguration {
|
||||
let result = VZMacGraphicsDeviceConfiguration()
|
||||
|
||||
if let hostMainScreen = NSScreen.main {
|
||||
if (vmConfig.display.unit ?? .point) == .point, let hostMainScreen = NSScreen.main {
|
||||
let vmScreenSize = NSSize(width: vmConfig.display.width, height: vmConfig.display.height)
|
||||
result.displays = [
|
||||
VZMacGraphicsDisplayConfiguration(for: hostMainScreen, sizeInPoints: vmScreenSize)
|
||||
|
|
|
|||
|
|
@ -70,20 +70,30 @@ struct Root: AsyncParsableCommand {
|
|||
HttpStatusCodeRange(min: 400, max: 400),
|
||||
HttpStatusCodeRange(min: 402, max: 599)
|
||||
]
|
||||
|
||||
// https://github.com/cirruslabs/tart/issues/1163
|
||||
options.enableAppLaunchProfiling = false
|
||||
options.configureProfiling = {
|
||||
$0.profileAppStarts = false
|
||||
}
|
||||
}
|
||||
|
||||
SentrySDK.configureScope { scope in
|
||||
scope.setExtra(value: ProcessInfo.processInfo.arguments, key: "Command-line arguments")
|
||||
}
|
||||
|
||||
// Enrich future events with Cirrus CI-specific tags
|
||||
if let tags = ProcessInfo.processInfo.environment["CIRRUS_SENTRY_TAGS"] {
|
||||
SentrySDK.configureScope { scope in
|
||||
for (key, value) in tags.split(separator: ",").compactMap({ parseCirrusSentryTag($0) }) {
|
||||
scope.setTag(value: value, key: key)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
defer { SentrySDK.flush(timeout: 2.seconds.timeInterval) }
|
||||
|
||||
SentrySDK.configureScope { scope in
|
||||
scope.setExtra(value: ProcessInfo.processInfo.arguments, key: "Command-line arguments")
|
||||
}
|
||||
|
||||
// Enrich future events with Cirrus CI-specific tags
|
||||
if let tags = ProcessInfo.processInfo.environment["CIRRUS_SENTRY_TAGS"] {
|
||||
SentrySDK.configureScope { scope in
|
||||
for (key, value) in tags.split(separator: ",").compactMap({ parseCirrusSentryTag($0) }) {
|
||||
scope.setTag(value: value, key: key)
|
||||
}
|
||||
defer {
|
||||
if ProcessInfo.processInfo.environment["SENTRY_DSN"] != nil {
|
||||
SentrySDK.flush(timeout: 2.seconds.timeInterval)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -92,7 +102,7 @@ struct Root: AsyncParsableCommand {
|
|||
do {
|
||||
try Config().gc()
|
||||
} catch {
|
||||
fputs("Failed to perform garbage collection!\n\(error)\n", stderr)
|
||||
fputs("Failed to perform garbage collection: \(error)\n", stderr)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -109,8 +119,10 @@ struct Root: AsyncParsableCommand {
|
|||
}
|
||||
|
||||
// Capture the error into Sentry
|
||||
SentrySDK.capture(error: error)
|
||||
SentrySDK.flush(timeout: 2.seconds.timeInterval)
|
||||
if ProcessInfo.processInfo.environment["SENTRY_DSN"] != nil {
|
||||
SentrySDK.capture(error: error)
|
||||
SentrySDK.flush(timeout: 2.seconds.timeInterval)
|
||||
}
|
||||
|
||||
// Handle a non-ArgumentParser's exception that requires a specific exit code to be set
|
||||
if let errorWithExitCode = error as? HasExitCode {
|
||||
|
|
|
|||
|
|
@ -51,7 +51,9 @@ class VM: NSObject, VZVirtualMachineDelegate, ObservableObject {
|
|||
clipboard: Bool = true,
|
||||
sync: VZDiskImageSynchronizationMode = .full,
|
||||
caching: VZDiskImageCachingMode? = nil,
|
||||
noTrackpad: Bool = false
|
||||
noTrackpad: Bool = false,
|
||||
noPointer: Bool = false,
|
||||
noKeyboard: Bool = false
|
||||
) throws {
|
||||
name = vmDir.name
|
||||
config = try VMConfig.init(fromURL: vmDir.configURL)
|
||||
|
|
@ -73,7 +75,9 @@ class VM: NSObject, VZVirtualMachineDelegate, ObservableObject {
|
|||
clipboard: clipboard,
|
||||
sync: sync,
|
||||
caching: caching,
|
||||
noTrackpad: noTrackpad
|
||||
noTrackpad: noTrackpad,
|
||||
noPointer: noPointer,
|
||||
noKeyboard: noKeyboard
|
||||
)
|
||||
virtualMachine = VZVirtualMachine(configuration: configuration)
|
||||
|
||||
|
|
@ -316,7 +320,9 @@ class VM: NSObject, VZVirtualMachineDelegate, ObservableObject {
|
|||
clipboard: Bool = true,
|
||||
sync: VZDiskImageSynchronizationMode = .full,
|
||||
caching: VZDiskImageCachingMode? = nil,
|
||||
noTrackpad: Bool = false
|
||||
noTrackpad: Bool = false,
|
||||
noPointer: Bool = false,
|
||||
noKeyboard: Bool = false
|
||||
) throws -> VZVirtualMachineConfiguration {
|
||||
let configuration = VZVirtualMachineConfiguration()
|
||||
|
||||
|
|
@ -356,8 +362,16 @@ class VM: NSObject, VZVirtualMachineDelegate, ObservableObject {
|
|||
configuration.keyboards = platformSuspendable.keyboardsSuspendable()
|
||||
configuration.pointingDevices = platformSuspendable.pointingDevicesSuspendable()
|
||||
} else {
|
||||
configuration.keyboards = vmConfig.platform.keyboards()
|
||||
if noTrackpad {
|
||||
|
||||
if noKeyboard {
|
||||
configuration.keyboards = []
|
||||
} else {
|
||||
configuration.keyboards = vmConfig.platform.keyboards()
|
||||
}
|
||||
|
||||
if noPointer {
|
||||
configuration.pointingDevices = []
|
||||
} else if noTrackpad {
|
||||
configuration.pointingDevices = vmConfig.platform.pointingDevicesSimplified()
|
||||
} else {
|
||||
configuration.pointingDevices = vmConfig.platform.pointingDevices()
|
||||
|
|
|
|||
|
|
@ -33,14 +33,24 @@ enum CodingKeys: String, CodingKey {
|
|||
case hardwareModel
|
||||
}
|
||||
|
||||
struct VMDisplayConfig: Codable {
|
||||
struct VMDisplayConfig: Codable, Equatable {
|
||||
enum Unit: String, Codable {
|
||||
case point = "pt"
|
||||
case pixel = "px"
|
||||
}
|
||||
|
||||
var width: Int = 1024
|
||||
var height: Int = 768
|
||||
var unit: Unit?
|
||||
}
|
||||
|
||||
extension VMDisplayConfig: CustomStringConvertible {
|
||||
var description: String {
|
||||
"\(width)x\(height)"
|
||||
if let unit {
|
||||
"\(width)x\(height)\(unit.rawValue)"
|
||||
} else {
|
||||
"\(width)x\(height)"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,11 @@
|
|||
import Foundation
|
||||
|
||||
class VMStorageLocal: PrunableStorage {
|
||||
let baseURL: URL = try! Config().tartHomeDir.appendingPathComponent("vms", isDirectory: true)
|
||||
let baseURL: URL
|
||||
|
||||
init() throws {
|
||||
baseURL = try Config().tartHomeDir.appendingPathComponent("vms", isDirectory: true)
|
||||
}
|
||||
|
||||
private func vmURL(_ name: String) -> URL {
|
||||
baseURL.appendingPathComponent(name, isDirectory: true)
|
||||
|
|
|
|||
|
|
@ -3,7 +3,11 @@ import Sentry
|
|||
import Retry
|
||||
|
||||
class VMStorageOCI: PrunableStorage {
|
||||
let baseURL = try! Config().tartCacheDir.appendingPathComponent("OCIs", isDirectory: true)
|
||||
let baseURL: URL
|
||||
|
||||
init() throws {
|
||||
baseURL = try Config().tartCacheDir.appendingPathComponent("OCIs", isDirectory: true)
|
||||
}
|
||||
|
||||
private func vmURL(_ name: RemoteName) -> URL {
|
||||
baseURL.appendingRemoteName(name)
|
||||
|
|
|
|||
|
|
@ -2,13 +2,9 @@ import XCTest
|
|||
@testable import tart
|
||||
|
||||
final class DiskImageFormatTests: XCTestCase {
|
||||
func testRawFormatIsAlwaysSupported() throws {
|
||||
XCTAssertTrue(DiskImageFormat.raw.isSupported)
|
||||
}
|
||||
|
||||
func testASIFFormatSupport() throws {
|
||||
// ASIF should be supported on macOS 15+
|
||||
if #available(macOS 15, *) {
|
||||
// ASIF should be supported on macOS 26+
|
||||
if #available(macOS 26, *) {
|
||||
XCTAssertTrue(DiskImageFormat.asif.isSupported)
|
||||
} else {
|
||||
XCTAssertFalse(DiskImageFormat.asif.isSupported)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,18 @@
|
|||
import XCTest
|
||||
@testable import tart
|
||||
|
||||
final class VMConfigTests: XCTestCase {
|
||||
func testVMDisplayConfig() throws {
|
||||
// Defaults units (points)
|
||||
var vmDisplayConfig = VMDisplayConfig.init(argument: "1234x5678")
|
||||
XCTAssertEqual(VMDisplayConfig(width: 1234, height: 5678, unit: nil), vmDisplayConfig)
|
||||
|
||||
// Explicit units (points)
|
||||
vmDisplayConfig = VMDisplayConfig.init(argument: "1234x5678pt")
|
||||
XCTAssertEqual(VMDisplayConfig(width: 1234, height: 5678, unit: .point), vmDisplayConfig)
|
||||
|
||||
// Explicit units (pixels)
|
||||
vmDisplayConfig = VMDisplayConfig.init(argument: "1234x5678px")
|
||||
XCTAssertEqual(VMDisplayConfig(width: 1234, height: 5678, unit: .pixel), vmDisplayConfig)
|
||||
}
|
||||
}
|
||||
|
|
@ -11,3 +11,4 @@
|
|||
"MD045": false # OK not to have a description for an image
|
||||
"MD046": false # Code block style [Expected: fenced; Actual: indented]
|
||||
"MD059": false # It's OK to have "here" links
|
||||
"MD051": false # MkDocs generates "#-no-pki" anchors, but markdownlint expects "#--no-pki" anchors
|
||||
|
|
|
|||
|
|
@ -60,9 +60,9 @@ device without a physical display connected. For example, a Mac Mini with a HDMI
|
|||
but a Mac Mini on a desk with a connected physical display is considered a personal computer. **Usage on personal computers
|
||||
and before reaching the 100 CPU cores limit is royalty-free and does not have the viral properties of AGPL.**
|
||||
|
||||
When an organization surpasses the 100 CPU cores limit, they will be required to obtain a [Gold Tier License](/licensing#license-tiers),
|
||||
which costs \$1000 per month. Upon reaching a limit of 500 CPU cores, a [Platinum Tier License](/licensing#license-tiers)
|
||||
(\$3000 per month) will be required, and for organizations that exceed 3000 CPU cores, a custom [Diamond Tier License](/licensing#license-tiers)
|
||||
When an organization surpasses the 100 CPU cores limit, they will be required to obtain a [Gold Tier License](../../licensing.md#license-tiers),
|
||||
which costs \$1000 per month. Upon reaching a limit of 500 CPU cores, a [Platinum Tier License](../../licensing.md#license-tiers)
|
||||
(\$3000 per month) will be required, and for organizations that exceed 3000 CPU cores, a custom [Diamond Tier License](../../licensing.md#license-tiers)
|
||||
(\$1 per core per month) will be necessary. **All paid license tiers will include priority feature development and SLAs on support with urgent issues.**
|
||||
|
||||
## Have we considered alternatives?
|
||||
|
|
|
|||
|
|
@ -89,6 +89,6 @@ orchard dev
|
|||
This will launch a development cluster with a single worker on your machine. Refer to [Orchard documentation](https://github.com/cirruslabs/orchard#creating-virtual-machines)
|
||||
on how to create your first virtual machine and access it.
|
||||
|
||||
In a [separate blog post](/blog/2023/04/28/ssh-over-grpc-or-how-orchard-simplifies-accessing-vms-in-private-networks/)
|
||||
In a [separate blog post](2023-04-28-orchard-ssh-over-grpc.md)
|
||||
we’ll cover how Orchard implements seamless SSH access over a gRPC connection. Stay tuned and please don’t hesitate to
|
||||
[reach out](https://github.com/cirruslabs/orchard/discussions/landing)!
|
||||
|
|
|
|||
|
|
@ -64,7 +64,7 @@ We’ve also initially considered using [Yamux](https://github.com/hashicorp/yam
|
|||
|
||||
First of all, we’ve made the new port-forwarding functionality available for integrations via the Orchard’s REST API:
|
||||
|
||||

|
||||

|
||||
|
||||
All you need is to use a WebSocket client when accessing this endpoint to make it work.
|
||||
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ allocate time to continue improving Tart which brings us to the section below.
|
|||
In the last 7 months we've had 12 feature releases that brought a lot of features requested by the community. Here are just
|
||||
a few of them to highlight:
|
||||
|
||||
-[Custom GitLab Runner Executor](/integrations/gitlab-runner/).
|
||||
-[Custom GitLab Runner Executor](../../integrations/gitlab-runner.md).
|
||||
-[Cluster Management via Orchard](2023-04-25-orchard-ga.md).
|
||||
-Numerous compatibility improvements for all kinds of OCI-registries.
|
||||
-Sonoma Support (see details [below](#macos-sonoma-updates)).
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ with preconfigured Tart installation that is optimized to work within AWS infras
|
|||
EC2 Mac Instances is a gem of engineering powered by AWS Nitro devices. Just imagine there is a physical Mac Mini with
|
||||
a plugged in Nitro device that can push the physical power button!
|
||||
|
||||

|
||||

|
||||
|
||||
This clever synergy between Apple Hardware and Nitro System allows seamless integration with VPC networking and booting macOS from an EBS volume.
|
||||
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ than recently announced Apple Silicon GitHub-manged runners that cost $0.16 per
|
|||
Now lets take a look at the new Cirrus Runners dashboard of a real customers that run their workflows on Cirrus Runners
|
||||
and **practically pushing the price performance pretty close to the theoretical minimum**.
|
||||
|
||||

|
||||

|
||||
|
||||
As you can see above Cirrus Runners Dashboard focuses on 4 core metrics:
|
||||
|
||||
|
|
@ -50,7 +50,7 @@ we can see that the downside of such great price performance is that jobs are wa
|
|||
Here is another example of Cirrus Runners Dashboard for a different customer that has a slightly higher price performance of $0.017 per minute
|
||||
but at the same time doesn't experience queue time at all. **Note that $0.017 is still 10 times cheaper than GitHub-managed Apple Silicon runners**.
|
||||
|
||||

|
||||

|
||||
|
||||
## Conclusion
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,44 @@
|
|||
---
|
||||
draft: false
|
||||
date: 2025-10-27
|
||||
search:
|
||||
exclude: true
|
||||
authors:
|
||||
- fkorotkov
|
||||
categories:
|
||||
- announcement
|
||||
---
|
||||
|
||||
# Press Release: Cirrus Labs Successfully Enforces Its Fair Source License
|
||||
|
||||
**New York City, NY – October 27th, 2025 – Cirrus Labs, Inc.**, a leading provider of platforms for digital transformation, today announced that it has reached a settlement agreement regarding a violation of its Fair Source License.
|
||||
|
||||
<!-- more -->
|
||||
|
||||
Cirrus Labs makes its Tart Virtualization Toolset, a leading virtualization toolset to build, run and manage macOS and Linux virtual machines (VMs) on Apple Silicon,
|
||||
freely available on GitHub under the Fair Source License, a source-available license. Tart is used by tens of thousands of engineers at no charge within its generous free‑use limits.
|
||||
Many large enterprises that need to exceed those limits support continued development through paid licenses. Cirrus Labs also uses Tart to power [Cirrus Runners](https://cirrus-runners.app/)
|
||||
— a drop‑in replacement for macOS and Linux runners for GitHub Actions — offered at a fixed monthly price for unlimited usage.
|
||||
|
||||
Cirrus Labs discovered that, **despite a prior licensing request that was declined due to a conflict of interest**, another company used Tart in a manner that exceeded the license’s free‑use limits,
|
||||
in order to create a competing product.
|
||||
|
||||
After several months of negotiations, the matter was settled and a settlement payment to Cirrus Labs was agreed upon.
|
||||
|
||||
!!! quote "Comment by Fedor Korotkov, CEO of Cirrus Labs"
|
||||
|
||||
As a company we embrace healthy competition that ultimately benefits the end user. Most of our users have no trouble complying with our license,
|
||||
and even when they need something more than our free use limits, we can almost always grant them a license that fits their needs. **This was an exceptional case.**
|
||||
We are pleased to have reached this settlement, which validates our source-available licensing strategy and reinforces our commitment to protecting our company and serving our community.
|
||||
|
||||
Cirrus Labs was represented in this matter by [Jordan Raphael](https://byronraphael.com/attorneys/jordan-raphael/) of Byron Raphael LLP, a boutique intellectual property law firm,
|
||||
and [Heather Meeker](https://www.techlawpartners.com/heather), a well-known specialist in open source and source available licensing.
|
||||
|
||||
The specific financial terms of the settlement and the identity of the counterparty remain confidential.
|
||||
|
||||
**About Cirrus Labs:** Cirrus Labs, Inc. is a bootstrapped developer-infrastructure company founded in 2017. Our offerings among others include Tart and Cirrus Runners,
|
||||
and our software is used by teams at category-leading companies including Atlassian, Figma, Zendesk, Sentry and many more.
|
||||
|
||||
Learn more at [https://tart.run/](https://tart.run/) and [https://cirrus-runners.app/](https://cirrus-runners.app/).
|
||||
|
||||
**Contact:** [hello@cirruslabs.org](mailto:hello@cirruslabs.org)
|
||||
48
docs/faq.md
48
docs/faq.md
|
|
@ -5,6 +5,42 @@ title: Frequently Asked Questions
|
|||
description: Advanced configuration and troubleshooting tips for advanced configurations.
|
||||
---
|
||||
|
||||
## Headless machines
|
||||
|
||||
Starting from macOS 15 (Sequoia), there's an undocumented requirement from [Virtualization.Framework](https://developer.apple.com/documentation/virtualization) (which Tart uses) to have an unlocked `login.keychain` available at the times when running a VM.
|
||||
|
||||
Without an existing and unlocked `login.keychain`, the VM won't start with errors like:
|
||||
|
||||
* `SecKeyCreateRandomKey_ios failed`
|
||||
* `Failed to generate keypair`
|
||||
* `Interaction is not allowed with the Security Server`
|
||||
|
||||
Below you'll find a couple of workarounds for this behavior.
|
||||
|
||||
### Log in via GUI at least once
|
||||
|
||||
Connect to the headless machine via [Screen Sharing](https://support.apple.com/guide/mac-help/share-the-screen-of-another-mac-mh14066/mac) and log in to a Mac user account. If you haven't done already, you can enable Screen Sharing [via the terminal](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/connect-to-mac-instance.html#mac-instance-vnc).
|
||||
|
||||
Logging in graphically will automatically create the `login.keychain`. Afterward, you have two options:
|
||||
|
||||
* configure [automatic log in to a Mac user account](https://support.apple.com/en-us/102316)
|
||||
* this will maintain a running user session (GUI) even after the machine reboots
|
||||
* moreover, you can still lock the screen (either manually [or automatically](https://support.apple.com/guide/mac-help/change-lock-screen-settings-on-mac-mh11784/mac)), however, the security benefit of this is questionable
|
||||
* use `security unlock-keychain login.keychain` to unlock the login keychain via the terminal
|
||||
* this command also supports the `-p` command-line argument, which allows you to supply a password and unlock non-interactively
|
||||
|
||||
### Create and unlock the login keychain via the terminal
|
||||
|
||||
Compared to the previous approach, this one is fully automated, but might stop working at some point in the future:
|
||||
|
||||
```shell
|
||||
security create-keychain -p '' login.keychain
|
||||
security unlock-keychain -p '' login.keychain
|
||||
security login-keychain -s login.keychain
|
||||
```
|
||||
|
||||
Note that this will create a `login.keychain` with an empty password. Consider supplying a different value to `-p` or omitting the `-p` to enter the password interactively.
|
||||
|
||||
## Troubleshooting crashes
|
||||
|
||||
If you experience a crash or encounter another error while using the tart executable, you can collect debug information to assist with troubleshooting. Run the following command in a separate terminal window to gather logs from the Tart process and the macOS Virtualization subsystem:
|
||||
|
|
@ -34,7 +70,7 @@ Then from within a virtual machine you can access the service using the router's
|
|||
or by running the following command in the Terminal:
|
||||
|
||||
```shell
|
||||
netstat -nr | grep default | head -n 1 | awk '{print $2}'
|
||||
netstat -nr | awk '/default/{print $2; exit}'
|
||||
```
|
||||
|
||||
Note: that accessing host is only possible with the default NAT network. If you are running your virtual machines with
|
||||
|
|
@ -143,14 +179,14 @@ This is because Tart uses [Keychain](https://en.wikipedia.org/wiki/Keychain_(sof
|
|||
To unlock the Keychain in an SSH session, run the following command, which will ask for your user's password:
|
||||
|
||||
```shell
|
||||
security unlock-keychain
|
||||
security unlock-keychain login.keychain
|
||||
```
|
||||
|
||||
This command also supports the `-p` command-line argument that allows you to supply the password and unlock non-interactively, which is great for scripts.
|
||||
This command also supports the `-p` command-line argument that allows you to supply a password and unlock non-interactively, which is great for scripts.
|
||||
|
||||
If that doesn't work for you for some reason, you can pass the credentials via the environment variables, see [Registry Authorization](integrations/vm-management.md#registry-authorization) for more details on how to do that.
|
||||
Alternatively, you can pass the credentials via the environment variables, see [Registry Authorization](integrations/vm-management.md#registry-authorization) for more details on how to do that.
|
||||
|
||||
## How Tart is different from Anka?
|
||||
## How is Tart different from Anka?
|
||||
|
||||
Under the hood Tart is using the same technology as Anka 3.0 so there should be no real difference in performance
|
||||
or features supported. If there is some feature missing please don't hesitate to [create a feature request](https://github.com/cirruslabs/tart/issues).
|
||||
|
|
@ -166,6 +202,8 @@ Tart does have an analogue of Anka Controller for managing VMs across a cluster
|
|||
|
||||
In case there's not enough space to fit the newly pulled or cloned VM image, Tart will remove the least recently accessed VMs from OCI cache and `.ipsw` files from IPSW cache until enough free space is available.
|
||||
|
||||
The `tart clone` command limits this automatic pruning to 100 GB by default to avoid removing too many cached items. You can change this limit with the `--prune-limit` option (in gigabytes).
|
||||
|
||||
To disable this functionality, set the `TART_NO_AUTO_PRUNE` environment variable either globally:
|
||||
|
||||
```shell
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ description: Run pipeline steps in isolated ephemeral Tart Virtual Machines.
|
|||
|
||||
It is possible to run [Buildkite](https://buildkite.com/) pipeline steps in isolated ephemeral Tart Virtual Machines with the help of [Tart Buildkite Plugin](https://github.com/cirruslabs/tart-buildkite-plugin):
|
||||
|
||||

|
||||

|
||||
|
||||
## Configuration
|
||||
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ brew install cirruslabs/cli/cirrus
|
|||
cirrus run
|
||||
```
|
||||
|
||||

|
||||

|
||||
|
||||
[Cirrus CI](https://cirrus-ci.org/) already leverages Tart to power its macOS cloud infrastructure. The `.cirrus.yml`
|
||||
config from above will just work in Cirrus CI and your tasks will be executed inside Tart VMs in our cloud.
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ Orchard cluster consists of three components:
|
|||
|
||||
* Controller — responsible for managing the cluster and scheduling of resources
|
||||
* Worker — responsible for executing the VMs
|
||||
* Client — responsible for creating, modifying and removing the resources on the Controller, can either be an [Orchard CLI](/orchard/using-orchard-cli) or [an API consumer](/orchard/integration-guide)
|
||||
* Client — responsible for creating, modifying and removing the resources on the Controller, can either be an [Orchard CLI](using-orchard-cli.md) or [an API consumer](integration-guide.md)
|
||||
|
||||
At the moment, only one Controller instance is currently supported, while you can deploy one or more Workers and run any number of Clients.
|
||||
|
||||
|
|
@ -14,7 +14,7 @@ In terms of networking requirements, only Controller needs to be directly access
|
|||
|
||||
When an Orchard Client or a Worker connects to the Controller, they need to establish trust and verify that they're talking to the right Controller, so that no [man-in-the-middle attack](https://en.wikipedia.org/wiki/Man-in-the-middle_attack) is possible.
|
||||
|
||||
Similarly to web-browsers (that rely on the [public key infrastructure](https://en.wikipedia.org/wiki/Public_key_infrastructure)) and SSH (which relies on semi-automated fingerprint verification), Orchard combines these two traits in a hybrid approach by defaulting to automatic PKI verification (can be disabled by [`--no-pki`](#--no-pki-override)) and falling-back to a manual verification for self-signed certificates.
|
||||
Similarly to web-browsers (that rely on the [public key infrastructure](https://en.wikipedia.org/wiki/Public_key_infrastructure)) and SSH (which relies on semi-automated fingerprint verification), Orchard combines these two traits in a hybrid approach by defaulting to automatic PKI verification (can be disabled by [`--no-pki`](#-no-pki-override)) and falling-back to a manual verification for self-signed certificates.
|
||||
|
||||
This hybrid approach is needed because the Controller can be configured in two ways:
|
||||
|
||||
|
|
@ -29,7 +29,7 @@ Below we'll explain how Orchard client and Worker secure the connection when acc
|
|||
|
||||
Client is associated with the Controller using a `orchard context create` command, which works as follows:
|
||||
|
||||
* Client attempts to connect to the Controller and validate its certificate using host's root CA set (can be disabled with [`--no-pki`](#--no-pki-override))
|
||||
* Client attempts to connect to the Controller and validate its certificate using host's root CA set (can be disabled with [`--no-pki`](#-no-pki-override))
|
||||
* if the Client encounters a *Controller with a publicly valid certificate*, that would be the last step and the association would succeed
|
||||
* if the Client is dealing with *Controller with a self-signed certificate*, the Client will do another connection attempt to probe the Controller's certificate
|
||||
* the probed Controller's certificate fingerprint is then presented to the user, and if the user agrees to trust it, the Client then considers that certificate to be trusted for a given context
|
||||
|
|
@ -53,7 +53,7 @@ The way Worker connects to the Controller using the `orchard worker run` command
|
|||
* when the Bootstrap Token contains the Controller's certificate:
|
||||
* the Orchard Worker will try to connect to the Controller with a trusted CA set containing only that certificate
|
||||
* when the Bootstrap Token has no Controller's certificate:
|
||||
* the Orchard Worker will try the PKI approach (can be disabled with [`--no-pki`](#--no-pki-override) to effectively prevent the Worker from connecting) and fail if certificate verification using PKI is not possible
|
||||
* the Orchard Worker will try the PKI approach (can be disabled with [`--no-pki`](#-no-pki-override) to effectively prevent the Worker from connecting) and fail if certificate verification using PKI is not possible
|
||||
|
||||
### `--no-pki` override
|
||||
|
||||
|
|
|
|||
|
|
@ -53,7 +53,7 @@ Here's other command-line arguments associated with this functionality:
|
|||
* `--insecure-ssh-no-client-auth` — allow SSH clients to connect to the controller's SSH server without authentication, thus only authenticating on the target worker/VM's SSH server
|
||||
* useful when you already have strong credentials on your VMs, and you want to share these VMs to others without additionally giving out Orchard Cluster credentials
|
||||
|
||||
Check out our [Jumping through the hoops: SSH jump host functionality in Orchard](/blog/2024/06/20/jumping-through-the-hoops-ssh-jump-host-functionality-in-orchard/) blog post for more information.
|
||||
Check out our [Jumping through the hoops: SSH jump host functionality in Orchard](../blog/posts/2024-06-20-jumping-through-the-hoops.md) blog post for more information.
|
||||
|
||||
## Deployment Methods
|
||||
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ Orchard has a REST API that follows [OpenAPI specification](https://swagger.io/s
|
|||
|
||||
You can run `orchard dev` locally and navigate to `http://127.0.0.1:6120/v1/` for interactive documentation.
|
||||
|
||||

|
||||

|
||||
|
||||
## Using the API
|
||||
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ a couple of VMs is not enough anymore for your needs? This is where [Orchard](ht
|
|||
comes in to play!
|
||||
|
||||
It allows you to orchestrate multiple Tart-capable hosts from either an Orchard CLI (which we demonstrate below)
|
||||
or [through the API](/orchard/integration-guide).
|
||||
or [through the API](integration-guide.md).
|
||||
|
||||
The easiest way to start is to run Orchard in local development mode:
|
||||
|
||||
|
|
@ -18,7 +18,7 @@ test both the CLI functionality and the API from a tool like cURL or programming
|
|||
authenticate requests.
|
||||
|
||||
Note that in production deployments, these two components are started separately and enable security by default. Please
|
||||
refer to [Deploying Controller](/orchard/deploying-controller) and [Deploying Workers](/orchard/deploying-workers) for
|
||||
refer to [Deploying Controller](deploying-controller.md) and [Deploying Workers](deploying-workers.md) for
|
||||
more information.
|
||||
|
||||
## Creating Virtual Machines
|
||||
|
|
@ -92,10 +92,10 @@ orchard delete vm sequoia-base
|
|||
In addition to controlling the Orchard via the CLI arguments, there are environment variables that may be beneficial
|
||||
both when automating Orchard and in daily use:
|
||||
|
||||
| Variable name | Description |
|
||||
|---------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `ORCHARD_HOME` | Override Orchard's home directory. Useful when running multiple Orchard instances on the same host and when testing. |
|
||||
| `ORCHARD_LICENSE_TIER` | The default license limit only allows connecting 4 Orchard Workers to the Orchard Controller. If you've purchased a [Gold Tier License](/licensing/), set this variable to `gold` to increase the limit to 20 Orchard Workers. And if you've purchased a [Platinum Tier License](/licensing/), set this variable to `platinum` to increase the limit to 200 Orchard Workers. |
|
||||
| `ORCHARD_URL` | Override controller URL on per-command basis. |
|
||||
| `ORCHARD_SERVICE_ACCOUNT_NAME` | Override service account name (used for controller API auth) on per-command basis. |
|
||||
| `ORCHARD_SERVICE_ACCOUNT_TOKEN` | Override service account token (used for controller API auth) on per-command basis. |
|
||||
| Variable name | Description |
|
||||
|---------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `ORCHARD_HOME` | Override Orchard's home directory. Useful when running multiple Orchard instances on the same host and when testing. |
|
||||
| `ORCHARD_LICENSE_TIER` | The default license limit only allows connecting 4 Orchard Workers to the Orchard Controller. If you've purchased a [Gold Tier License](../licensing.md), set this variable to `gold` to increase the limit to 20 Orchard Workers. And if you've purchased a [Platinum Tier License](../licensing.md), set this variable to `platinum` to increase the limit to 200 Orchard Workers. |
|
||||
| `ORCHARD_URL` | Override controller URL on per-command basis. |
|
||||
| `ORCHARD_SERVICE_ACCOUNT_NAME` | Override service account name (used for controller API auth) on per-command basis. |
|
||||
| `ORCHARD_SERVICE_ACCOUNT_TOKEN` | Override service account token (used for controller API auth) on per-command basis. |
|
||||
|
|
|
|||
|
|
@ -75,3 +75,12 @@ orchard create vm --resources bandwidth-mbps=7500 <NAME>
|
|||
However, after this VM is scheduled, the 10 Gbps Mac Studio will only be able to accommodate one more VM (due to internal Apple EULA limit for macOS virtualization) with `bandwidth-mbps=2500` or less.
|
||||
|
||||
After the VM finishes, the unused resources will be available again.
|
||||
|
||||
## Automatic resources
|
||||
|
||||
In addition to manually specifying resources when starting a worker, the following resources are discovered and set automatically by the worker for convenience:
|
||||
|
||||
* `org.cirruslabs.logical-cores` — number of logical cores on the host
|
||||
* `org.cirruslabs.memory-mib` — total memory in MiB (mebibytes) on the host
|
||||
|
||||
Note that the values for these resources are scraped only once at worker startup.
|
||||
|
|
|
|||
|
|
@ -95,8 +95,8 @@ ssh admin@$(tart ip sequoia-base)
|
|||
|
||||
```bash
|
||||
brew install cirruslabs/cli/sshpass
|
||||
sshpass -p admin ssh -o "StrictHostKeyChecking no" admin@$(tart ip sequoia-base) "uname -a"
|
||||
sshpass -p admin ssh -o "StrictHostKeyChecking no" admin@$(tart ip sequoia-base) < script.sh
|
||||
sshpass -p admin ssh -o "StrictHostKeyChecking no" -o "UserKnownHostsFile=/dev/null" admin@$(tart ip sequoia-base) "uname -a"
|
||||
sshpass -p admin ssh -o "StrictHostKeyChecking no" -o "UserKnownHostsFile=/dev/null" admin@$(tart ip sequoia-base) < script.sh
|
||||
```
|
||||
|
||||
## Mounting directories
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
pytest
|
||||
testcontainers
|
||||
requests == 2.31.0 # work around https://github.com/psf/requests/issues/6707
|
||||
requests
|
||||
bitmath
|
||||
pytest-dependency
|
||||
paramiko
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ def test_run(tart, run_opts):
|
|||
vm_name = f"integration-test-run-{uuid.uuid4()}"
|
||||
|
||||
# Instantiate a VM with admin:admin SSH access
|
||||
tart.run(["clone", "ghcr.io/cirruslabs/macos-sonoma-base:latest", vm_name])
|
||||
tart.run(["clone", "ghcr.io/cirruslabs/macos-tahoe-base:latest", vm_name])
|
||||
|
||||
# Run the VM asynchronously
|
||||
tart_run_process = tart.run_async(["run", vm_name] + run_opts)
|
||||
|
|
|
|||
Loading…
Reference in New Issue