threefold.tfrobot #
TFRobot
Wrapper for TFGrid mass deployer tfrobot
fn config_get #
fn config_get(configname string) !DeployConfig
fn configure #
fn configure(instance string, config_ Config) !TFRobot[Config]
fn get #
fn get(instance string) !TFRobot[Config]
fn sshagent_keys_add #
fn sshagent_keys_add(mut config DeployConfig) !
get all keys from ssh_agent and add to the config
fn vm_config_get #
fn vm_config_get(configname string, name string) !VMConfig
fn vm_get #
fn vm_get(configname string, name string) !VMOutput
fn vms_get #
fn vms_get(configname string) ![]VMOutput
fn (TFRobot[Config]) cancel #
fn (mut robot TFRobot[Config]) cancel(mut config CancelConfig) !
fn (TFRobot[Config]) deploy #
fn (mut robot TFRobot[Config]) deploy(config_ DeployConfig) !DeployResult
fn (TFRobot[Config]) job_new #
fn (mut r TFRobot[Config]) job_new(job Job) !Job
fn (TFRobot[Config]) vm_deploy #
fn (mut robot TFRobot[Config]) vm_deploy(args_ VMSpecs) !VMOutput
only connect to yggdrasil and mycelium
enum Network #
enum Network {
main
dev
qa
test
}
pub struct Mount { disk_name string mount_point string }
struct CancelConfig #
struct CancelConfig {
mut:
name string @[required]
mnemonic string @[required]
network Network @[required]
node_groups []CancelGroup @[required]
}
struct CancelGroup #
struct CancelGroup {
name string @[required]
}
struct Config #
struct Config {
pub mut:
configtype string = 'tfrobot' // needs to be defined
mnemonics string
network string = 'main'
}
struct DeployConfig #
struct DeployConfig {
pub mut:
name string
mnemonic string
network Network = .main
node_groups []NodeGroup @[required]
vms []VMConfig @[required]
ssh_keys map[string]string
debug bool
}
struct DeployResult #
struct DeployResult {
pub:
ok map[string][]VMOutput
error map[string]string
}
pub struct SSHKey
struct Deployment #
struct Deployment {
pub:
config VMConfig
quantity int
}
Deployment is an instruction to deploy a quantity of VMs with a given configuration
struct Job #
struct Job {
pub:
name string
network Network
mneumonic string @[required]
pub mut:
ssh_keys map[string]string
deployments []Deployment
vms map[string]VirtualMachine
}
fn (Job) deploy_vms #
fn (mut j Job) deploy_vms(config VMConfig, quantity int)
fn (Job) vm_get #
fn (j Job) vm_get(name string) ?VirtualMachine
vms := parse_output(result.output)! // for vm in vms { // j.vms[vm.name] = vm // } return vms }
fn (Job) add_ssh_key #
fn (mut j Job) add_ssh_key(name string, key string)
struct Mount #
struct Mount {
pub:
disk_name string
mount_point string
}
struct NodeArgs #
struct NodeArgs {
pub mut:
ip4 bool = true
ip6 bool = true
planetary bool = true
mycelium bool = true
timeout int = 120 // timeout in sec
}
pub fn (vm VMOutput) ssh_interactive(key_path string) ! { // b := builder.new() // node := b.node_new(ipaddr:"root@${vm.ip4}")! // node.exec_interactive('${homedir}/hero/bin/install.sh')! // time.sleep(15 * time.second) if vm.public_ip4 != '' { osal.execute_interactive('ssh -i ${key_path} root@${vm.public_ip4.all_before('/')}')! } else if vm.yggdrasil_ip != '' { osal.execute_interactive('ssh -i ${key_path} root@${vm.yggdrasil_ip}')! } else { return error('no public nor planetary ip available to use') } }
struct NodeGroup #
struct NodeGroup {
name string
nodes_count int @[required]
free_cpu int @[required] // number of logical cores
free_mru int @[required] // amount of memory in GB
free_ssd int // amount of ssd storage in GB
free_hdd int // amount of hdd storage in GB
dedicated bool // are nodes dedicated
public_ip4 bool
public_ip6 bool
certified bool // should the nodes be certified(if false the nodes could be certified of diyed)
region string // region could be the name of the continents the nodes are located in (africa, americas, antarctic, antarctic ocean, asia, europe, oceania, polar)
}
struct TFRobot #
struct TFRobot[T] {
base.BaseConfig[T]
pub mut:
jobs map[string]Job
}
struct VMConfig #
struct VMConfig {
pub mut:
name string @[required]
vms_count int = 1 @[required]
node_group string
cpu int = 4 @[required]
mem int = 4 @[required] // in GB
public_ip4 bool = false
public_ip6 bool = false
ygg_ip bool = true
mycelium_ip bool = true
flist string @[required]
entry_point string @[required]
root_size int = 20
ssh_key string
env_vars map[string]string
}
struct VMOutput #
struct VMOutput {
pub mut:
name string @[json: 'Name'; required]
network_name string @[json: 'NetworkName'; required]
node_group string
deployment_name string
public_ip4 string @[json: 'PublicIP4'; required]
public_ip6 string @[json: 'PublicIP6'; required]
yggdrasil_ip string @[json: 'YggIP'; required]
mycelium_ip string @[json: 'MyceliumIP'; required]
ip string @[json: 'IP'; required]
mounts []Mount @[json: 'Mounts'; required]
node_id u32 @[json: 'NodeID']
contract_id u64 @[json: 'ContractID']
}
fn (VMOutput) node #
fn (vm VMOutput) node(args NodeArgs) !&builder.Node
return ssh node (can be used to do actions remotely) will check all available channels till it can ssh into the node
fn (VMOutput) tcpport_addr_get #
fn (vm VMOutput) tcpport_addr_get(port int) !string
fn (VMOutput) vscode #
fn (vm VMOutput) vscode() !
pub fn (vm VMOutput) tasks_see(dag &dagu.DAG) ! { r := vm.dagu_addr_get()! // http://[302:1d81:cef8:3049:fbe1:69ba:bd8c:52ec]:8081/dags/holochain_scaffold cmd3 := "open 'http://[${r.addr}]:8081/dags/${dag.name}'" // console.print_debug(cmd3) osal.exec(cmd: cmd3)! }
fn (VMOutput) vscode_holochain #
fn (vm VMOutput) vscode_holochain() !
fn (VMOutput) vscode_holochain_proxy #
fn (vm VMOutput) vscode_holochain_proxy() !
struct VirtualMachine #
struct VirtualMachine {
name string
ip4 string
ip6 string
yggip string
ip string
// mounts []string
}
VirtualMachine represents the VM info outputted by tfrobot
- README
- fn config_get
- fn configure
- fn get
- fn sshagent_keys_add
- fn vm_config_get
- fn vm_get
- fn vms_get
- type TFRobot[Config]
- enum Network
- struct CancelConfig
- struct CancelGroup
- struct Config
- struct DeployConfig
- struct DeployResult
- struct Deployment
- struct Job
- struct Mount
- struct NodeArgs
- struct NodeGroup
- struct TFRobot
- struct VMConfig
- struct VMOutput
- struct VirtualMachine