Skip to content
Permalink

Comparing changes

Choose two branches to see what’s changed or to start a new pull request. If you need to, you can also or learn more about diff comparisons.

Open a pull request

Create a new pull request by comparing changes across two branches. If you need to, you can also . Learn more about diff comparisons here.
base repository: NixOS/nixpkgs
Failed to load repositories. Confirm that selected base ref is valid, then try again.
Loading
base: 412ad9447a86
Choose a base ref
...
head repository: NixOS/nixpkgs
Failed to load repositories. Confirm that selected head ref is valid, then try again.
Loading
compare: fcf3458356ee
Choose a head ref
  • 3 commits
  • 5 files changed
  • 2 contributors

Commits on Jan 13, 2018

  1. nixos/xautolock: rewrite and add some options

    WilliButz authored and Samuel Leathers committed Jan 13, 2018
    Copy the full SHA
    1e0eebf View commit details
  2. nixops: digital ocean PR #765

    Samuel Leathers committed Jan 13, 2018
    Copy the full SHA
    eb64a95 View commit details
  3. nixops: enable vultr

    Samuel Leathers committed Jan 13, 2018

    Verified

    This commit was created on GitHub.com and signed with GitHub’s verified signature. The key has expired.
    Copy the full SHA
    fcf3458 View commit details
92 changes: 78 additions & 14 deletions nixos/modules/services/x11/xautolock.nix
Original file line number Diff line number Diff line change
@@ -31,7 +31,17 @@ in
type = types.string;

description = ''
The script to use when locking the computer.
The script to use when automatically locking the computer.
'';
};

nowlocker = mkOption {
default = null;
example = "i3lock -i /path/to/img";
type = types.nullOr types.string;

description = ''
The script to use when manually locking the computer with <command>xautolock -locknow</command>.
'';
};

@@ -45,28 +55,82 @@ in
};

notifier = mkOption {
default = "notify-send 'Locking in 10 seconds'";
type = types.string;
default = null;
example = literalExample ''
"${pkgs.libnotify}/bin/notify-send \"Locking in 10 seconds\""
'';
type = types.nullOr types.string;

description = ''
Notification script to be used to warn about the pending autolock.
'';
};

killer = mkOption {
default = null; # default according to `man xautolock` is none
example = "systemctl suspend";
type = types.nullOr types.string;

description = ''
The script to use when nothing has happend for as long as <option>killtime</option>
'';
};

killtime = mkOption {
default = 20; # default according to `man xautolock`
type = types.int;

description = ''
Minutes xautolock waits until it executes the script specified in <option>killer</option>
(Has to be at least 10 minutes)
'';
};

extraOptions = mkOption {
type = types.listOf types.str;
default = [ ];
example = [ "-detectsleep" ];
description = ''
Additional command-line arguments to pass to
<command>xautolock</command>.
'';
};
};
};

config = mkIf cfg.enable {
environment.systemPackages = with pkgs; [ xautolock ];

services.xserver.displayManager.sessionCommands = with builtins; with pkgs; ''
${xautolock}/bin/xautolock \
${concatStringsSep " \\\n" ([
"-time ${toString(cfg.time)}"
"-locker ${cfg.locker}"
] ++ optional cfg.enableNotifier (concatStringsSep " " [
"-notify ${toString(cfg.notify)}"
"-notifier \"${cfg.notifier}\""
]))} &
'';
systemd.user.services.xautolock = {
description = "xautolock service";
wantedBy = [ "graphical-session.target" ];
partOf = [ "graphical-session.target" ];
serviceConfig = with lib; {
ExecStart = strings.concatStringsSep " " ([
"${pkgs.xautolock}/bin/xautolock"
"-noclose"
"-time ${toString cfg.time}"
"-locker '${cfg.locker}'"
] ++ optionals cfg.enableNotifier [
"-notify ${toString cfg.notify}"
"-notifier '${cfg.notifier}'"
] ++ optionals (cfg.nowlocker != null) [
"-nowlocker '${cfg.nowlocker}'"
] ++ optionals (cfg.killer != null) [
"-killer '${cfg.killer}'"
"-killtime ${toString cfg.killtime}"
] ++ cfg.extraOptions);
Restart = "always";
};
};
assertions = [
{
assertion = cfg.enableNotifier -> cfg.notifier != null;
message = "When enabling the notifier for xautolock, you also need to specify the notify script";
}
{
assertion = cfg.killer != null -> cfg.killtime >= 10;
message = "killtime has to be at least 10 minutes according to `man xautolock`";
}
];
};
}
379 changes: 379 additions & 0 deletions pkgs/tools/package-management/nixops/634.patch
Original file line number Diff line number Diff line change
@@ -0,0 +1,379 @@
From 9cfa1565b3c6e3e9f8000d245bf7f421a347c3d0 Mon Sep 17 00:00:00 2001
From: Samuel Leathers <sam@appliedtrust.com>
Date: Wed, 22 Mar 2017 08:39:18 -0400
Subject: [PATCH 1/2] adding vultr backend

---
nix/eval-machine-info.nix | 1 +
nix/options.nix | 1 +
nix/vultr.nix | 54 +++++++++++++++
nixops/backends/vultr_vm.py | 161 ++++++++++++++++++++++++++++++++++++++++++++
release.nix | 1 +
5 files changed, 218 insertions(+)
create mode 100644 nix/vultr.nix
create mode 100644 nixops/backends/vultr_vm.py

diff --git a/nix/eval-machine-info.nix b/nix/eval-machine-info.nix
index 1d9f5099..f258b989 100644
--- a/nix/eval-machine-info.nix
+++ b/nix/eval-machine-info.nix
@@ -289,6 +289,7 @@ rec {
hetzner = optionalAttrs (v.config.deployment.targetEnv == "hetzner") v.config.deployment.hetzner;
container = optionalAttrs (v.config.deployment.targetEnv == "container") v.config.deployment.container;
route53 = v.config.deployment.route53;
+ vultr = optionalAttrs (v.config.deployment.targetEnv == "vultr") v.config.deployment.vultr;
virtualbox =
let cfg = v.config.deployment.virtualbox; in
optionalAttrs (v.config.deployment.targetEnv == "virtualbox") (cfg
diff --git a/nix/options.nix b/nix/options.nix
index 0866c3ab..14a3e8bb 100644
--- a/nix/options.nix
+++ b/nix/options.nix
@@ -24,6 +24,7 @@ in
./hetzner.nix
./container.nix
./libvirtd.nix
+ ./vultr.nix
];


diff --git a/nix/vultr.nix b/nix/vultr.nix
new file mode 100644
index 00000000..133227f1
--- /dev/null
+++ b/nix/vultr.nix
@@ -0,0 +1,54 @@
+{ config, pkgs, lib, utils, ... }:
+
+with utils;
+with lib;
+with import ./lib.nix lib;
+
+let
+ cfg = config.deployment.vultr;
+in
+{
+ ###### interface
+ options = {
+
+ deployment.vultr.label = mkOption {
+ default = "";
+ example = "myserver.example.com";
+ type = types.str;
+ description = ''
+ The name of the server.
+ '';
+ };
+
+ deployment.vultr.dcid = mkOption {
+ default = "";
+ example = "1";
+ type = types.str;
+ description = ''
+ The region. See region_list API for list of regions available
+ '';
+ };
+
+ deployment.vultr.vpsplanid = mkOption {
+ example = "201";
+ type = types.str;
+ description = ''
+ The VPSPLANID. Make sure the region you chose supports the plan ID.
+ This determines the resources and cost of the instance.
+ '';
+ };
+ deployment.vultr.snapshotid = mkOption {
+ example = "9e758d1a379eb";
+ type = types.str;
+ description = ''
+ The snapshotid. This needs created following this tutorial:
+ https://www.vultr.com/docs/install-nixos-on-vultr
+ '';
+ };
+ };
+
+ config = mkIf (config.deployment.targetEnv == "vultr") {
+ nixpkgs.system = mkOverride 900 "x86_64-linux";
+ services.openssh.enable = true;
+ };
+}
diff --git a/nixops/backends/vultr_vm.py b/nixops/backends/vultr_vm.py
new file mode 100644
index 00000000..43d53f04
--- /dev/null
+++ b/nixops/backends/vultr_vm.py
@@ -0,0 +1,161 @@
+# -*- coding: utf-8 -*-
+"""
+A backend for Vultr.
+
+Vultr doesn't have an official nixos image. To use this backend you must
+follow the instructions here to generate a snapshot:
+ https://www.vultr.com/docs/install-nixos-on-vultr
+
+Still to do:
+* Use nixos OS type when Vultr adds one.
+"""
+import os
+import os.path
+import time
+import nixops.resources
+from nixops.backends import MachineDefinition, MachineState
+from nixops.nix_expr import Function, RawValue
+import nixops.util
+import nixops.known_hosts
+import socket
+from vultr import Vultr, VultrError
+from json import dumps
+
+class VultrDefinition(MachineDefinition):
+ @classmethod
+ def get_type(cls):
+ return "vultr"
+
+ def __init__(self, xml, config):
+ MachineDefinition.__init__(self, xml, config)
+ self.dcid = config["vultr"]["dcid"]
+ self.vpsplanid = config["vultr"]["vpsplanid"]
+ self.snapshotid = config["vultr"]["snapshotid"]
+ self.label = config["vultr"]["label"]
+ # TODO: only use 164 if snapshotid is set.
+ self.osid = 164
+
+ def show_type(self):
+ return "{0} [{1}]".format(self.get_type(), self.dcid)
+
+
+class VultrState(MachineState):
+ @classmethod
+ def get_type(cls):
+ return "vultr"
+
+ state = nixops.util.attr_property("state", MachineState.MISSING, int) # override
+ apikey = nixops.util.attr_property("vultr.apikey", None)
+ public_ipv4 = nixops.util.attr_property("publicIpv4", None)
+ default_gateway = nixops.util.attr_property("defaultGateway", None)
+ netmask = nixops.util.attr_property("netmask", None)
+ subid = nixops.util.attr_property("vultr.subid", None)
+ label = nixops.util.attr_property("vultr.label", None)
+ _ssh_private_key = nixops.util.attr_property("vultr.sshPrivateKey", None)
+ _ssh_public_key = nixops.util.attr_property("vultr.sshPublicKey", None)
+ _ssh_public_key_deployed = nixops.util.attr_property("vultr.sshPublicKeyDeployed", False, bool)
+ # TODO: only use 164 if snapshotid is set.
+ osid = 164
+
+ def __init__(self, depl, name, id):
+ MachineState.__init__(self, depl, name, id)
+ self.name = name
+
+ def get_ssh_name(self):
+ return self.public_ipv4
+
+ def get_ssh_flags(self, *args, **kwargs):
+ super_state_flags = super(VultrState, self).get_ssh_flags(*args, **kwargs)
+ if self.subid and self._ssh_public_key_deployed:
+ return super_state_flags + [
+ '-o', 'UserKnownHostsFile=/dev/null',
+ '-o', 'StrictHostKeyChecking=no',
+ '-i', self.get_ssh_private_key_file(),
+ ]
+ return super_state_flags
+
+ def get_physical_spec(self):
+ return {
+ ('config', 'boot', 'loader', 'grub', 'device'): '/dev/vda',
+ ('config', 'fileSystems', '/'): { 'device': '/dev/vda1', 'fsType': 'btrfs'},
+ ('config', 'users', 'extraUsers', 'root', 'openssh', 'authorizedKeys', 'keys'): [self._ssh_public_key]
+ }
+
+ def get_ssh_private_key_file(self):
+ if self._ssh_private_key_file:
+ return self._ssh_private_key_file
+ else:
+ return self.write_ssh_private_key(self._ssh_private_key)
+
+ def create_after(self, resources, defn):
+ # make sure the ssh key exists before we do anything else
+ return {
+ r for r in resources if
+ isinstance(r, nixops.resources.ssh_keypair.SSHKeyPairState)
+ }
+
+ def get_api_key(self):
+ apikey = os.environ.get('VULTR_API_KEY', self.apikey)
+ if apikey == None:
+ raise Exception("VULTR_API_KEY must be set in the environment to deploy instances")
+ return apikey
+
+
+ def destroy(self, wipe=False):
+ self.log("destroying instance {}".format(self.subid))
+ vultr = Vultr(self.get_api_key())
+ try:
+ vultr.server_destroy(self.subid)
+ except VultrError:
+ self.log("An error occurred destroying instance. Assuming it's been destroyed already.")
+ self.public_ipv4 = None
+ self.subid = None
+
+ def create(self, defn, check, allow_reboot, allow_recreate):
+ self.set_common_state(defn)
+
+ if self.subid is not None:
+ return
+
+ self.log_start("creating instance ...")
+ self.log("dcid: " + str(defn.dcid))
+ self.log("osid: " + str(defn.osid))
+ self.log("vpsplanid: " + str(defn.vpsplanid))
+ self.log("snapshotid: " + str(defn.snapshotid))
+ self.log("label: " + str(defn.label))
+ vultr = Vultr(self.get_api_key())
+ snapshots = vultr.snapshot_list()
+ if defn.snapshotid not in snapshots:
+ raise Exception("Unexpected Error: snapshot {} does not exist".format(defn.snapshotid))
+ server_create_output = vultr.server_create(dcid=defn.dcid, osid=defn.osid, vpsplanid=defn.vpsplanid, snapshotid=defn.snapshotid, enable_ipv6='yes', enable_private_network='yes', label=defn.label)
+ subid = server_create_output['SUBID']
+ self.log("instance id: " + subid)
+ server_info = vultr.server_list()[subid]
+ while server_info['status'] == 'pending' or server_info['server_state'] != 'ok':
+ server_info = vultr.server_list()[subid]
+ time.sleep(1)
+ self.log_continue("[status: {} state: {}] ".format(server_info['status'], server_info['server_state']))
+ if server_info['status'] == 'active' and server_info['server_state'] == 'ok':
+ # vultr sets ok before locked when restoring snapshot. Need to make sure we're really ready.
+ time.sleep(10)
+ server_info = vultr.server_list()[subid]
+ if server_info['status'] != 'active' or server_info['server_state'] != 'ok':
+ raise Exception("unexpected status: {}/{}".format(server_info['status'],server_info['server_state']))
+ self.subid = subid
+ self.label = server_info['label']
+ self.log_start("generating new SSH keypair... ")
+ key_name = "NixOps client key for {0}".format(self.subid)
+ self._ssh_private_key, self._ssh_public_key = \
+ nixops.util.create_key_pair(key_name=key_name)
+ self.public_ipv4 = server_info['main_ip']
+ self.log_end("{}".format(self.public_ipv4))
+ self.default_gateway = server_info['gateway_v4']
+ self.netmask = server_info['netmask_v4']
+ self.wait_for_ssh()
+
+ def switch_to_configuration(self, method, sync, command=None):
+ res = super(VultrState, self).switch_to_configuration(method, sync, command)
+ if res == 0:
+ self._ssh_public_key_deployed = True
+ return res
+
diff --git a/release.nix b/release.nix
index adab62ad..5149fc94 100644
--- a/release.nix
+++ b/release.nix
@@ -95,6 +95,7 @@ rec {
pysqlite
datadog
digital-ocean
+ vultr
];

# For "nix-build --run-env".

From 22dc35e8f9045337f65e72a31b0833a401611dc0 Mon Sep 17 00:00:00 2001
From: Samuel Leathers <sam@appliedtrust.com>
Date: Thu, 6 Apr 2017 14:58:09 -0400
Subject: [PATCH 2/2] adding documentation and trival example

---
doc/manual/overview.xml | 39 +++++++++++++++++++++++++++++++++++++++
examples/trivial-vultr.nix | 12 ++++++++++++
nixops/backends/vultr_vm.py | 6 ++++--
3 files changed, 55 insertions(+), 2 deletions(-)
create mode 100644 examples/trivial-vultr.nix

diff --git a/doc/manual/overview.xml b/doc/manual/overview.xml
index 743031ea..49e6da00 100644
--- a/doc/manual/overview.xml
+++ b/doc/manual/overview.xml
@@ -1493,6 +1493,45 @@ xlink:href="https://github.com/elitak/nixos-infect">nixos-infect</link>
. <literal>nixos-infect</literal> itself uses the undocumented
<literal>NIXOS_LUSTRATE</literal> under the hood.
</para>
+</section>
+<section xml:id="sec-deploying-to-vultr"><title>Deploying to Vultr</title>
+
+<para><xref linkend="ex-trivial-vultr.nix" /> shows how to run
+a vultr instance. We only support instance creation and
+destruction at the moment.
+</para>
+
+<para>Note that an image needs created in vultr using an iso with your default ssh
+ key for deployment. Follow these
+<link xlink:href="https://www.vultr.com/docs/install-nixos-on-vultr">directions</link>
+to setup an image using nixos. Make sure to assign a public <literal>ssh-key</literal>
+to root in configuration.nix and allow root login with key. This key is only used
+for initial connection and a new key stored in the nixops state will be used for future connections. After booting the instance, create a snapshot in the vultr UI.
+</para>
+
+<example xml:id="ex-trivial-vultr.nix">
+ <title><filename>trivial-vultr.nix</filename>: A trivial vultr setup</title>
+<programlisting>
+{
+ machine = { config, pkgs, ... }: {
+ services.nginx.enable = true;
+ services.openssh.enable = true;
+
+ deployment.targetEnv = "vultr";
+ deployment.vultr.snapshotid = "xxxxxxxxxxxxx";
+ deployment.vultr.dcid = "1";
+ deployment.vultr.vpsplanid = "201";
+ deployment.vultr.label = "dev01.mydomain.com";
+ };
+}
+</programlisting>
+</example>
+
+<para>The snapshotid can be retrieved from the UI or API for the snapshot
+created in the first step. A list of vps plan id's can be retrieved from the
+API call <literal>/v1/plans/list</literal> and the list of dc id's can be
+retrieved from the API call <literal>/v1/regions/list</literal>.</para>
+
</section>

<section><title>Deploying to Libvirtd (Qemu)</title>
diff --git a/examples/trivial-vultr.nix b/examples/trivial-vultr.nix
new file mode 100644
index 00000000..8a5fde87
--- /dev/null
+++ b/examples/trivial-vultr.nix
@@ -0,0 +1,12 @@
+{
+ machine = { config, pkgs, ... }: {
+ services.nginx.enable = true;
+ services.openssh.enable = true;
+
+ deployment.targetEnv = "vultr";
+ deployment.vultr.snapshotid = "xxxxxxxxxxxxx";
+ deployment.vultr.dcid = "1";
+ deployment.vultr.vpsplanid = "201";
+ deployment.vultr.label = "dev01.mydomain.com";
+ };
+}
diff --git a/nixops/backends/vultr_vm.py b/nixops/backends/vultr_vm.py
index 43d53f04..55675fbd 100644
--- a/nixops/backends/vultr_vm.py
+++ b/nixops/backends/vultr_vm.py
@@ -75,11 +75,13 @@ def get_ssh_flags(self, *args, **kwargs):
return super_state_flags

def get_physical_spec(self):
- return {
+ return Function("{ ... }", {
+ 'imports': [ RawValue('<nixpkgs/nixos/modules/profiles/qemu-guest.nix>') ],
+ ('config', 'boot', 'initrd', 'availableKernelModules'): [ "ata_piix", "uhci_hcd", "virtio_pci", "sr_mod", "virtio_blk" ],
('config', 'boot', 'loader', 'grub', 'device'): '/dev/vda',
('config', 'fileSystems', '/'): { 'device': '/dev/vda1', 'fsType': 'btrfs'},
('config', 'users', 'extraUsers', 'root', 'openssh', 'authorizedKeys', 'keys'): [self._ssh_public_key]
- }
+ })

def get_ssh_private_key_file(self):
if self._ssh_private_key_file:
44 changes: 44 additions & 0 deletions pkgs/tools/package-management/nixops/765.patch
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
From 6aa5d6d85cbc94617a52d6b8f14dd6bb8b418b34 Mon Sep 17 00:00:00 2001
From: Samuel Leathers <sam@appliedtrust.com>
Date: Mon, 6 Nov 2017 15:01:55 -0500
Subject: [PATCH] digital ocean: enp0s3 -> ens3

---
nixops/backends/digital_ocean.py | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/nixops/backends/digital_ocean.py b/nixops/backends/digital_ocean.py
index e883045e..63e47f19 100644
--- a/nixops/backends/digital_ocean.py
+++ b/nixops/backends/digital_ocean.py
@@ -8,7 +8,7 @@

I hit a few subtle problems along the way:
* DO doesn't do dhcp so we have to hard-code the network configuration
-* Ubuntu still uses eth0, 1 etc, not enp0s3 etc so we have a network
+* Ubuntu still uses eth0, 1 etc, not ens3 etc so we have a network
link name change after the reboot.
* I had to modify nixos-infect to reflect the network link name changes,
and to not reboot to avoid ssh-interruption and therefore errors.
@@ -86,10 +86,10 @@ def prefix_len(netmask):
networking = {
'defaultGateway': self.default_gateway,
'nameservers': ['8.8.8.8'], # default provided by DO
- ('interfaces', 'enp0s3', 'ip4'): [{"address": self.public_ipv4, 'prefixLength': prefix_len(self.netmask)}],
+ ('interfaces', 'ens3', 'ip4'): [{"address": self.public_ipv4, 'prefixLength': prefix_len(self.netmask)}],
}
if self.public_ipv6:
- networking[('interfaces', 'enp0s3', 'ip6')] = [{'address': self.public_ipv6['address'], 'prefixLength': self.public_ipv6['prefixLength']}]
+ networking[('interfaces', 'ens3', 'ip6')] = [{'address': self.public_ipv6['address'], 'prefixLength': self.public_ipv6['prefixLength']}]
if self.default_gateway6:
networking['defaultGateway6'] = self.default_gateway6

@@ -188,7 +188,7 @@ def create(self, defn, check, allow_reboot, allow_recreate):

# run modified nixos-infect
# - no reboot
- # - predictable network interface naming (enp0s3 etc)
+ # - predictable network interface naming (ens3 etc)
self.wait_for_ssh()
self.log_start("running nixos-infect")
self.run_command('bash </dev/stdin 2>&1', stdin=open(infect_path))
4 changes: 4 additions & 0 deletions pkgs/tools/package-management/nixops/default.nix
Original file line number Diff line number Diff line change
@@ -6,4 +6,8 @@ callPackage ./generic.nix (rec {
url = "http://nixos.org/releases/nixops/nixops-${version}/nixops-${version}.tar.bz2";
sha256 = "00y2arc5rffvy6xmx4p6ibpjyc61k8dkiabq7ccwwjgckz1d2dpb";
};
patches = [
./634.patch
./765.patch
];
})
5 changes: 3 additions & 2 deletions pkgs/tools/package-management/nixops/generic.nix
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
{ lib, python2Packages, fetchurl, libxslt, docbook5_xsl, openssh
# version args
, src, version
, src, version, patches ? []
}:

python2Packages.buildPythonApplication {
name = "nixops-${version}";
inherit version src;
inherit version src patches;

buildInputs = [ libxslt ];

@@ -25,6 +25,7 @@ python2Packages.buildPythonApplication {
pysqlite
datadog
digital-ocean
vultr
];

doCheck = false;