From ab814a63e26ecccd924cdd247ae53f0e72fb8166 Mon Sep 17 00:00:00 2001 From: Jair Gonzalez Date: Fri, 7 Apr 2017 16:35:45 -0500 Subject: [PATCH 01/21] meta-iotqa: Clean up Bluetooth test classes Remove not used tags and update comments and variable names for added clarity. Signed-off-by: Jair Gonzalez --- .../connectivity/bluetooth/bluetooth.py | 162 +++++++----------- .../connectivity/bluetooth/bt_6lowpan.py | 29 +--- .../bluetooth/bt_6lowpan_mnode.py | 58 ++----- .../connectivity/bluetooth/bt_command.py | 63 +++---- .../bluetooth/bt_command_mnode.py | 127 +++++--------- .../connectivity/bluetooth/bt_stability.py | 50 +++--- 6 files changed, 174 insertions(+), 315 deletions(-) diff --git a/meta-iotqa/lib/oeqa/runtime/connectivity/bluetooth/bluetooth.py b/meta-iotqa/lib/oeqa/runtime/connectivity/bluetooth/bluetooth.py index 7852fa51dc..7274dd0974 100644 --- a/meta-iotqa/lib/oeqa/runtime/connectivity/bluetooth/bluetooth.py +++ b/meta-iotqa/lib/oeqa/runtime/connectivity/bluetooth/bluetooth.py @@ -1,13 +1,11 @@ import time import os -import string from oeqa.utils.helper import shell_cmd_timeout + class BTFunction(object): - """ - @class BTFunction - """ log = "" + def __init__(self, target): self.target = target # un-block software rfkill lock @@ -16,22 +14,14 @@ def __init__(self, target): self.target.run('killall hcitool') def target_collect_info(self, cmd): - """ - @fn target_collect_info - @param self - @param cmd - @return - """ (status, output) = self.target.run(cmd) self.log = self.log + "\n\n[Debug] Command output --- %s: \n" % cmd self.log = self.log + output def target_hciconfig_init(self): - ''' init target bluetooth by hciconfig commands - @fn target_hciconfig_init - @param self - @return - ''' + """ + Init target bluetooth by hciconfig commands + """ (status, output) = self.target.run('hciconfig hci0 reset') assert status == 0, "reset hci0 fails, please check if your BT device exists" time.sleep(1) @@ -41,41 +31,33 @@ def target_hciconfig_init(self): time.sleep(1) def set_leadv(self): - ''' Get hci0 MAC address - @fn get_bt_mac - @param self - @return - ''' + """ + Get hci0 MAC address + """ (status, output) = self.target.run('hciconfig hci0 leadv') time.sleep(2) assert status == 0, "Set leadv fail: %s" % (output) def get_bt_mac(self): - ''' Get hci0 MAC address - @fn get_bt_mac - @param self - @return - ''' + """ + Get hci0 MAC address + """ (status, output) = self.target.run('hciconfig hci0 | grep "BD Address"') return output.split()[2] def get_bt0_ip(self): - ''' Get bt0 (ipv6) address - @fn get_bt0_ip - @param self - @return - ''' + """ + Get bt0 (ipv6) address + """ self.target_collect_info('ifconfig') (status, output) = self.target.run('ifconfig bt0 | grep "inet6 addr"') assert status == 0, "Get bt0 address failure: %s\n%s" % (output, self.log) return output.split('%')[0].split()[2] def get_name(self): - ''' Get bt0 device name by bluetoothctl - @fn get_name - @param self - @return - ''' + """ + Get bt0 device name by bluetoothctl + """ exp = os.path.join(os.path.dirname(__file__), "files/bt_get_name.exp") btmac = self.get_bt_mac() cmd = 'expect %s %s %s' % (exp, self.target.ip, btmac) @@ -91,33 +73,27 @@ def get_name(self): return "" def enable_bluetooth(self): - ''' enable bluetooth after testing - @fn enable_bluetooth - @param self - @return - ''' + """ + Enable bluetooth + """ # Enable Bluetooth (status, output) = self.target.run('connmanctl enable bluetooth') assert status == 0, "Error messages: %s" % output time.sleep(1) def disable_bluetooth(self): - ''' disable bluetooth after testing - @fn disable_bluetooth - @param self - @return - ''' + """ + Disable bluetooth + """ (status, output) = self.target.run('connmanctl disable bluetooth') assert status == 0, "Error messages: %s" % output # sleep some seconds to ensure disable is done time.sleep(1) def ctl_power_on(self): - '''bluetoothctl power on bluetooth device - @fn ctl_power_on - @param self - @return - ''' + """ + Use bluetoothctl to power on bluetooth device + """ # start bluetoothctl, then input 'power on' exp = os.path.join(os.path.dirname(__file__), "files/power_on.exp") target_ip = self.target.ip @@ -127,11 +103,9 @@ def ctl_power_on(self): assert status == 2, "power on command fails: %s" % output def ctl_power_off(self): - '''bluetoothctl power off bluetooth device - @fn ctl_power_off - @param self - @return - ''' + """ + Use bluetoothctl to power off bluetooth device + """ # start bluetoothctl, then input 'power off' exp = os.path.join(os.path.dirname(__file__), "files/power_off.exp") target_ip = self.target.ip @@ -140,12 +114,10 @@ def ctl_power_off(self): output = output.decode("ascii") assert status == 2, "power off command fails: %s" % output - def ctl_visable_on(self): - '''bluetoothctl enable visibility - @fn ctl_visable_on - @param self - @return - ''' + def ctl_visible_on(self): + """ + Use bluetoothctl to enable visibility + """ # start bluetoothctl, then input 'discoverable on' exp = os.path.join(os.path.dirname(__file__), "files/discoverable_on.exp") target_ip = self.target.ip @@ -154,12 +126,10 @@ def ctl_visable_on(self): output = output.decode("ascii") assert status == 2, "discoverable on command fails: %s" % output - def ctl_visable_off(self): - '''bluetoothctl disable visibility - @fn ctl_visable_off - @param self - @return - ''' + def ctl_visible_off(self): + """ + Use bluetoothctl to disable visibility + """ # start bluetoothctl, then input 'discoverable off' exp = os.path.join(os.path.dirname(__file__), "files/discoverable_off.exp") target_ip = self.target.ip @@ -169,11 +139,9 @@ def ctl_visable_off(self): assert status == 2, "discoverable off command fails: %s" % output def insert_6lowpan_module(self): - '''Insert BLE 6lowpan module - @fn insert_6lowpan_module - @param self - @return - ''' + """ + Insert BLE 6lowpan module + """ status, output = self.target.run('modprobe bluetooth_6lowpan') assert status == 0, "insert ble 6lowpan module fail: %s" % output # check lsmod, to see if the module is in @@ -185,11 +153,9 @@ def insert_6lowpan_module(self): assert False, "BLE 6lowpan module insert fails. %s" % self.log def enable_6lowpan_ble(self): - '''Enable 6lowpan over BLE - @fn enable_6lowpan_ble - @param self - @return - ''' + """ + Enable 6lowpan over BLE + """ self.insert_6lowpan_module() status, output = self.target.run('echo 1 > /sys/kernel/debug/bluetooth/6lowpan_enable') assert status == 0, "Enable ble 6lowpan fail: %s" % output @@ -202,11 +168,9 @@ def enable_6lowpan_ble(self): assert False, "BLE 6lowpan interface is: %s\n%s" % (output, self.log) def disable_6lowpan_ble(self): - '''Disable 6lowpan over BLE - @fn disable_6lowpan_ble - @param self - @return - ''' + """ + Disable 6lowpan over BLE + """ status, output = self.target.run('echo 0 > /sys/kernel/debug/bluetooth/6lowpan_enable') assert status == 0, "Disable ble 6lowpan fail: %s" % output # check file number, it should be 1 @@ -218,23 +182,19 @@ def disable_6lowpan_ble(self): pass def bt0_ping6_check(self, ipv6): - ''' On main target, run ping6 to ping second's ipv6 address - @fn bt0_ping6_check - @param self + """ On main target, run ping6 to ping second's ipv6 address + @param ipv6: second target ipv6 address - @return - ''' - cmd='ping6 -I bt0 -c 5 %s' % ipv6 + """ + cmd = 'ping6 -I bt0 -c 5 %s' % ipv6 (status, output) = self.target.run(cmd) assert status == 0, "Ping second target lowpan0 ipv6 address fail: %s" % output def bt0_ssh_check(self, ipv6): - ''' On main target, ssh to second - @fn bt0_ssh_check - @param self + """ On main target, ssh to second + @param ipv6: second target ipv6 address - @return - ''' + """ # ssh root@%bt0 ssh_key = os.path.join(os.path.dirname(__file__), "files/refkit_qa_rsa") self.target.copy_to(ssh_key, "/tmp/") @@ -248,12 +208,10 @@ def bt0_ssh_check(self, ipv6): assert status == 2, "Error messages: %s" % output def connect_6lowpan_ble(self, second): - '''Build 6lowpan connection between taregts[0] and targets[1] over BLE - @fn connect_6lowpan_ble - @param self + """ Build 6lowpan connection between targets[0] and targets[1] over BLE + @param second: second target - @return - ''' + """ self.enable_6lowpan_ble() second.enable_6lowpan_ble() success = 1 @@ -277,13 +235,11 @@ def connect_6lowpan_ble(self, second): assert success == 0, "No bt0 generated: %s\n%s" % (output, self.log) def gatt_basic_check(self, btmac, point): - '''Do basic gatt tool check points. - @fn gatt_basic_check - @param self + """ Do basic gatt tool check points. + @param btmac: remote advertising device BT MAC address @param point: a string for basic checking points. - @return - ''' + """ # Local does gatttool commands if point == "connect": exp = os.path.join(os.path.dirname(__file__), "files/gatt_connect.exp") diff --git a/meta-iotqa/lib/oeqa/runtime/connectivity/bluetooth/bt_6lowpan.py b/meta-iotqa/lib/oeqa/runtime/connectivity/bluetooth/bt_6lowpan.py index 7241d49083..65008a0d46 100644 --- a/meta-iotqa/lib/oeqa/runtime/connectivity/bluetooth/bt_6lowpan.py +++ b/meta-iotqa/lib/oeqa/runtime/connectivity/bluetooth/bt_6lowpan.py @@ -1,36 +1,23 @@ -import os -import time -from oeqa.runtime.bluetooth import bluetooth +from oeqa.runtime.connectivity.bluetooth import bluetooth from oeqa.oetest import oeRuntimeTest from oeqa.utils.helper import shell_cmd_timeout + class CommBT6LowPAN(oeRuntimeTest): - """ - @class CommBT6LowPAN - """ def setUp(self): - """ - @fn setUp - @param self - @return - """ self.bt = bluetooth.BTFunction(self.target) self.bt.target_hciconfig_init() def test_bt_insert_6lowpan_module(self): - '''Insert 6lowpan module - @fn test_bt_insert_6lowpan_module - @param self - @return - ''' + """ + Insert 6lowpan module + """ self.bt.insert_6lowpan_module() def test_bt_enable_6lowpan_ble(self): - '''Enable 6lowpan over BLE - @fn test_bt_enable_6lowpan_ble - @param self - @return - ''' + """ + Enable 6lowpan over BLE + """ self.bt.enable_6lowpan_ble() ## diff --git a/meta-iotqa/lib/oeqa/runtime/connectivity/bluetooth/bt_6lowpan_mnode.py b/meta-iotqa/lib/oeqa/runtime/connectivity/bluetooth/bt_6lowpan_mnode.py index 31308cb549..232345b4bf 100644 --- a/meta-iotqa/lib/oeqa/runtime/connectivity/bluetooth/bt_6lowpan_mnode.py +++ b/meta-iotqa/lib/oeqa/runtime/connectivity/bluetooth/bt_6lowpan_mnode.py @@ -1,19 +1,10 @@ -import os -import time -from oeqa.runtime.bluetooth import bluetooth +from oeqa.runtime.connectivity.bluetooth import bluetooth from oeqa.oetest import oeRuntimeTest from oeqa.utils.helper import shell_cmd_timeout + class CommBT6LowPanMNode(oeRuntimeTest): - """ - @class CommBT6LowPanMNode - """ def setUp(self): - """ - @fn setUp - @param self - @return - """ self.bt1 = bluetooth.BTFunction(self.targets[0]) self.bt2 = bluetooth.BTFunction(self.targets[1]) @@ -21,58 +12,43 @@ def setUp(self): self.bt2.target_hciconfig_init() def tearDown(self): - """ - @fn tearDown - @param self - @return - """ self.bt1.disable_6lowpan_ble() self.bt2.disable_6lowpan_ble() def test_bt_connect_6lowpan(self): - '''Setup two devices with BLE - @fn test_bt_connect_6lowpan - @param self - @return - ''' + """ + Setup two devices with BLE + """ self.bt1.connect_6lowpan_ble(self.bt2) def test_bt_6lowpan_ping6_out(self): - '''Setup two devices with BLE, and ping each other - @fn test_bt_6lowpan_ping6_out - @param self - @return - ''' + """ + Setup two devices with BLE, and ping each other + """ self.bt1.connect_6lowpan_ble(self.bt2) # first device to ping second device self.bt1.bt0_ping6_check(self.bt2.get_bt0_ip()) def test_bt_6lowpan_be_pinged(self): - '''Setup two devices with BLE, and ping each other - @fn test_bt_6lowpan_be_pinged - @param self - @return - ''' + """ + Setup two devices with BLE, and ping each other + """ self.bt1.connect_6lowpan_ble(self.bt2) # first device to ping second device self.bt2.bt0_ping6_check(self.bt1.get_bt0_ip()) def test_bt_6lowpan_ssh_to(self): - '''Setup two devices with BLE, and ssh to remote - @fn test_bt_6lowpan_ssh_to - @param self - @return - ''' + """ + Setup two devices with BLE, and ssh to remote + """ self.bt1.connect_6lowpan_ble(self.bt2) # first device to ping second device self.bt1.bt0_ssh_check(self.bt2.get_bt0_ip()) def test_bt_6lowpan_be_ssh(self): - '''Setup two devices with BLE, and remote ssh to self - @fn test_bt_6lowpan_be_ssh - @param self - @return - ''' + """ + Setup two devices with BLE, and remote ssh to self + """ self.bt1.connect_6lowpan_ble(self.bt2) # first device to ping second device self.bt2.bt0_ssh_check(self.bt1.get_bt0_ip()) diff --git a/meta-iotqa/lib/oeqa/runtime/connectivity/bluetooth/bt_command.py b/meta-iotqa/lib/oeqa/runtime/connectivity/bluetooth/bt_command.py index 9728d631d5..50fa091b11 100644 --- a/meta-iotqa/lib/oeqa/runtime/connectivity/bluetooth/bt_command.py +++ b/meta-iotqa/lib/oeqa/runtime/connectivity/bluetooth/bt_command.py @@ -1,67 +1,50 @@ import os import time import subprocess -from oeqa.runtime.bluetooth import bluetooth +from oeqa.runtime.connectivity.bluetooth import bluetooth from oeqa.oetest import oeRuntimeTest from oeqa.utils.helper import shell_cmd_timeout from oeqa.utils.helper import get_files_dir + class CommBTTest(oeRuntimeTest): - """ - @class CommBTTest - """ def setUp(self): - """ - @fn setUp - @param self - @return - """ self.bt = bluetooth.BTFunction(self.target) self.bt.target_hciconfig_init() def test_bt_power_on(self): - '''enable bluetooth device - @fn test_bt_power_on - @param self - @return - ''' + """ + Enable bluetooth device + """ self.target.run('hciconfig hci0 down') self.bt.ctl_power_on() def test_bt_power_off(self): - '''disable bluetooth device - @fn test_bt_power_off - @param self - @return - ''' + """ + Disable bluetooth device + """ self.target.run('hciconfig hci0 up') self.bt.ctl_power_off() - def test_bt_visable_on(self): - '''enable visibility - @fn test_bt_visable_on - @param self - @return - ''' + def test_bt_visible_on(self): + """ + Enable visibility + """ self.target.run('hciconfig hci0 noscan') - self.bt.ctl_visable_on() + self.bt.ctl_visible_on() - def test_bt_visable_off(self): - '''disable visibility - @fn test_bt_visable_off - @param self - @return - ''' + def test_bt_visible_off(self): + """ + Disable visibility + """ self.target.run('hciconfig hci0 piscan') - self.bt.ctl_visable_off() + self.bt.ctl_visible_off() def test_bt_change_name(self): - '''change BT device name - @fn test_bt_change_name - @param self - @return - ''' - new_name="iot-bt-test" + """ + Change BT device name + """ + new_name = "iot-bt-test" self.target.run('hciconfig hci0 name %s' % new_name) name = self.bt.get_name() if type(name) is bytes: @@ -69,7 +52,7 @@ def test_bt_change_name(self): if name == new_name: pass else: - self.assertEqual(1, 0, msg="Bluetooth set name fails. Current name is: %s" % name) + self.assertEqual(1, 0, msg="Bluetooth set name fails. Current name is: %s" % name) ## # @} diff --git a/meta-iotqa/lib/oeqa/runtime/connectivity/bluetooth/bt_command_mnode.py b/meta-iotqa/lib/oeqa/runtime/connectivity/bluetooth/bt_command_mnode.py index 82115f781c..97ce674042 100644 --- a/meta-iotqa/lib/oeqa/runtime/connectivity/bluetooth/bt_command_mnode.py +++ b/meta-iotqa/lib/oeqa/runtime/connectivity/bluetooth/bt_command_mnode.py @@ -1,46 +1,35 @@ import os import time import subprocess -from oeqa.runtime.bluetooth import bluetooth +from oeqa.runtime.connectivity.bluetooth import bluetooth from oeqa.oetest import oeRuntimeTest from oeqa.utils.helper import shell_cmd_timeout from oeqa.utils.helper import get_files_dir + class CommBTTestMNode(oeRuntimeTest): - """ - @class CommBTTestMNode - """ @classmethod def setUpClass(cls): - '''Copy gatttool to /tmp/ folder - @fn setUpClass - @param cls - @return - ''' - bt1=bluetooth.BTFunction(cls.tc.targets[0]) - bt2=bluetooth.BTFunction(cls.tc.targets[1]) + """ + Copy gatttool to /tmp/ folder + """ + bt1 = bluetooth.BTFunction(cls.tc.targets[0]) + bt2 = bluetooth.BTFunction(cls.tc.targets[1]) copy_to_path = os.path.join(get_files_dir(), 'gatttool') cls.tc.targets[0].copy_to(copy_to_path, "/tmp/") bt1.target.run('chmod +x /tmp/gatttool') bt2.target.run('chmod +x /tmp/gatttool') def setUp(self): - """ - @fn setUp - @param self - @return - """ self.bt1 = bluetooth.BTFunction(self.targets[0]) self.bt2 = bluetooth.BTFunction(self.targets[1]) self.bt1.target_hciconfig_init() self.bt2.target_hciconfig_init() def test_bt_gatt_read_primary(self): - '''Use gatttool to show remote primary attr handles - @fn test_bt_gatt_read_primary - @param self - @return - ''' + """ + Use gatttool to show remote primary attr handles + """ for i in range(3): self.bt2.target_hciconfig_init() self.bt2.set_leadv() @@ -51,11 +40,9 @@ def test_bt_gatt_read_primary(self): self.assertEqual(status, 0, msg="gatttool Primary is wrong: %s" % output) def test_bt_gatt_read_characteristics(self): - '''Use gatttool to show target characteristics handles - @fn test_bt_gatt_read_characteristics - @param self - @return - ''' + """ + Use gatttool to show target characteristics handles + """ for i in range(3): self.bt2.target_hciconfig_init() self.bt2.set_leadv() @@ -66,11 +53,9 @@ def test_bt_gatt_read_characteristics(self): self.assertEqual(status, 0, msg="gatttool characteristics fails: %s" % output) def test_bt_gatt_read_handle(self): - '''Use gatttool to read target handle value - @fn test_bt_gatt_read_handle - @param self - @return - ''' + """ + Use gatttool to read target handle value + """ for i in range(3): self.bt2.target_hciconfig_init() self.bt2.set_leadv() @@ -81,11 +66,9 @@ def test_bt_gatt_read_handle(self): self.assertEqual(status, 0, msg="gatttool read handle fails: %s" % output) def test_bt_gatt_connect(self): - '''Use gatttool interactive mode to do connect - @fn test_bt_gatt_connect - @param self - @return - ''' + """ + Use gatttool interactive mode to do connect + """ for i in range(3): self.bt2.target_hciconfig_init() self.bt2.set_leadv() @@ -96,11 +79,9 @@ def test_bt_gatt_connect(self): self.assertEqual(status, 2, msg="gatttool connect fails: %s" % output) def test_bt_remote_gatt_read_primary(self): - '''Use gatttool to show host primary attr handles - @fn test_bt_remote_gatt_read_primary - @param self - @return - ''' + """ + Use gatttool to show host primary attr handles + """ for i in range(3): self.bt1.target_hciconfig_init() self.bt1.set_leadv() @@ -111,11 +92,9 @@ def test_bt_remote_gatt_read_primary(self): self.assertEqual(status, 0, msg="gatttool be read primary fails: %s" % output) def test_bt_remote_gatt_read_characteristics(self): - '''Use gatttool to show host characteristics handles - @fn test_bt_remote_gatt_read_characteristics - @param self - @return - ''' + """ + Use gatttool to show host characteristics handles + """ for i in range(3): self.bt1.target_hciconfig_init() self.bt1.set_leadv() @@ -126,11 +105,9 @@ def test_bt_remote_gatt_read_characteristics(self): self.assertEqual(status, 0, msg="gatttool be read characteristics fails: %s" % output) def test_bt_remote_gatt_read_handle(self): - '''Use gatttool to read host handle value - @fn test_bt_remote_gatt_read_handle - @param self - @return - ''' + """ + Use gatttool to read host handle value + """ for i in range(3): self.bt1.target_hciconfig_init() self.bt1.set_leadv() @@ -141,11 +118,9 @@ def test_bt_remote_gatt_read_handle(self): self.assertEqual(status, 0, msg="gatttool be read handle fails: %s" % output) def test_bt_remote_gatt_connect(self): - '''Use gatttool interactive mode to do connect to host - @fn test_bt_remote_gatt_connect - @param self - @return - ''' + """ + Use gatttool interactive mode to do connect to host + """ for i in range(3): self.bt1.target_hciconfig_init() self.bt1.set_leadv() @@ -156,11 +131,9 @@ def test_bt_remote_gatt_connect(self): self.assertEqual(status, 2, msg="gatttool be connected fails: %s" % output) def test_bt_visible(self): - '''Do traditional visible and be scanned by other (not ble scan) - @fn test_bt_visible - @param self - @return - ''' + """ + Do traditional visible and be scanned by other (not ble scan) + """ self.bt1.target.run('hciconfig hci0 noleadv') for i in range(3): # For init function already set visible status, directly be scanned. @@ -174,11 +147,9 @@ def test_bt_visible(self): self.assertEqual(status, 2, msg="Scan remote device fails: %s" % output) def test_bt_scan(self): - '''Scan nearby bluetooth devices (not ble scan) - @fn test_bt_scan - @param self - @return - ''' + """ + Scan nearby bluetooth devices (not ble scan) + """ self.bt2.target.run('hciconfig hci0 noleadv') for i in range(3): # For init function already set visible status, directly be scanned. @@ -192,11 +163,9 @@ def test_bt_scan(self): self.assertEqual(status, 2, msg="Scan remote device fails: %s" % output) def test_bt_le_advertising(self): - '''Target does LE advertising, another device scans it - @fn test_bt_le_advertising - @param self - @return - ''' + """ + Target does LE advertising, another device scans it + """ for i in range(3): # close legacy iscan mode self.bt1.target.run('hciconfig hci0 noscan') @@ -217,11 +186,9 @@ def test_bt_le_advertising(self): self.assertEqual(status, 2, msg="Be LE-scanned fails: %s" % output) def test_bt_le_scan(self): - '''Another device (host) does LE advertising, target scans it - @fn test_bt_le_scan - @param self - @return - ''' + """ + Another device (host) does LE advertising, target scans it + """ for i in range(3): # close legacy iscan mode self.bt2.target.run('hciconfig hci0 noscan') @@ -242,11 +209,9 @@ def test_bt_le_scan(self): self.assertEqual(status, 2, msg="LE Scan other fails: %s" % output) def test_bt_pairing(self): - '''Use bluetoothctl to pair IoT device with host - @fn test_bt_pairing - @param self - @return - ''' + """ + Use bluetoothctl to pair IoT device with host + """ # On remote, start pair_slave in back-ground slave_exp = os.path.join(os.path.dirname(__file__), "files/bt_pair_slave_on_iot.exp") cmd = "%s %s %s" % (slave_exp, self.bt2.target.ip, self.bt1.get_bt_mac()) diff --git a/meta-iotqa/lib/oeqa/runtime/connectivity/bluetooth/bt_stability.py b/meta-iotqa/lib/oeqa/runtime/connectivity/bluetooth/bt_stability.py index 77100290a1..5db2f4c6cd 100644 --- a/meta-iotqa/lib/oeqa/runtime/connectivity/bluetooth/bt_stability.py +++ b/meta-iotqa/lib/oeqa/runtime/connectivity/bluetooth/bt_stability.py @@ -1,49 +1,41 @@ import os import time import subprocess -from oeqa.runtime.bluetooth import bluetooth +from oeqa.runtime.connectivity.bluetooth import bluetooth from oeqa.oetest import oeRuntimeTest from oeqa.utils.helper import shell_cmd_timeout from oeqa.utils.helper import get_files_dir + class BTStabilityTest(oeRuntimeTest): - """ - @class BTStabilityTest - """ + power_cycles = 200 + def setUp(self): - ''' initialize bluetooth class - @fn setUp - @param self - @return - ''' + """ + Initialize bluetooth class + """ self.bt = bluetooth.BTFunction(self.target) - def test_bt_onoff_multiple_time(self): - '''bluetoothctl to power on/off for multiple times - @fn test_bt_onoff_multiple_time - @param self - @return - ''' - time=200 - for i in range(1, time): + def test_bt_onoff_multiple_times(self): + """ + Use bluetoothctl to power on/off multiple times + """ + for i in range(1, self.power_cycles): self.bt.ctl_power_on() self.bt.ctl_power_off() if i % 20 == 0: - print ("Finish %d times, successful." % i) + print ("Finished %d cycles successfuly." % i) - def test_bt_visable_onoff_multiple_time(self): - '''bluetoothctl to turn discoverable on/off for multiple times - @fn test_bt_visable_onoff_multiple_time - @param self - @return - ''' + def test_bt_visible_onoff_multiple_times(self): + """ + Use bluetoothctl to turn discoverable on/off multiple times + """ self.bt.ctl_power_on() - time=200 - for i in range(1, time): - self.bt.ctl_visable_on() - self.bt.ctl_visable_off() + for i in range(1, self.power_cycles): + self.bt.ctl_visible_on() + self.bt.ctl_visible_off() if i % 20 == 0: - print ("Finish %d times, successful." % i) + print ("Finished %d cycles successfuly." % i) ## # @} From d915e9a83ede2b93869fe17b81fcdd61927f621c Mon Sep 17 00:00:00 2001 From: Jair Gonzalez Date: Fri, 7 Apr 2017 16:48:45 -0500 Subject: [PATCH 02/21] meta-iotqa: Enable single-node Bluetooth tests Signed-off-by: Jair Gonzalez --- meta-iotqa/conf/test/refkit-image-common.manifest | 2 ++ 1 file changed, 2 insertions(+) diff --git a/meta-iotqa/conf/test/refkit-image-common.manifest b/meta-iotqa/conf/test/refkit-image-common.manifest index 1f12a6ecc4..52280dedf9 100644 --- a/meta-iotqa/conf/test/refkit-image-common.manifest +++ b/meta-iotqa/conf/test/refkit-image-common.manifest @@ -3,6 +3,8 @@ oeqa.runtime.core.baseos oeqa.runtime.connectivity.services.ssh oeqa.runtime.connectivity.services.managerdaemon oeqa.runtime.connectivity.bluetooth.btcheck +oeqa.runtime.connectivity.bluetooth.bt_command +oeqa.runtime.connectivity.bluetooth.bt_6lowpan oeqa.runtime.connectivity.wifi.wifi_connect oeqa.runtime.programming.python.apprt_python oeqa.runtime.multimedia.audio.alsa From c12c21becced95a72e3852a4a4e1301cd9547374 Mon Sep 17 00:00:00 2001 From: Jair Gonzalez Date: Fri, 7 Apr 2017 19:08:36 -0500 Subject: [PATCH 03/21] meta-iotqa: Disable Bluetooth after Bluetooth testing Disable Bluetooth after completing Bluetooth testing in order to not interfere with other tests. Signed-off-by: Jair Gonzalez --- .../lib/oeqa/runtime/connectivity/bluetooth/bt_6lowpan.py | 4 ++++ .../lib/oeqa/runtime/connectivity/bluetooth/bt_command.py | 4 ++++ .../oeqa/runtime/connectivity/bluetooth/bt_stability.py | 8 +++++--- 3 files changed, 13 insertions(+), 3 deletions(-) diff --git a/meta-iotqa/lib/oeqa/runtime/connectivity/bluetooth/bt_6lowpan.py b/meta-iotqa/lib/oeqa/runtime/connectivity/bluetooth/bt_6lowpan.py index 65008a0d46..0af4ee1ab1 100644 --- a/meta-iotqa/lib/oeqa/runtime/connectivity/bluetooth/bt_6lowpan.py +++ b/meta-iotqa/lib/oeqa/runtime/connectivity/bluetooth/bt_6lowpan.py @@ -6,8 +6,12 @@ class CommBT6LowPAN(oeRuntimeTest): def setUp(self): self.bt = bluetooth.BTFunction(self.target) + self.bt.enable_bluetooth() self.bt.target_hciconfig_init() + def tearDown(self): + self.bt.disable_bluetooth() + def test_bt_insert_6lowpan_module(self): """ Insert 6lowpan module diff --git a/meta-iotqa/lib/oeqa/runtime/connectivity/bluetooth/bt_command.py b/meta-iotqa/lib/oeqa/runtime/connectivity/bluetooth/bt_command.py index 50fa091b11..977250f520 100644 --- a/meta-iotqa/lib/oeqa/runtime/connectivity/bluetooth/bt_command.py +++ b/meta-iotqa/lib/oeqa/runtime/connectivity/bluetooth/bt_command.py @@ -10,8 +10,12 @@ class CommBTTest(oeRuntimeTest): def setUp(self): self.bt = bluetooth.BTFunction(self.target) + self.bt.enable_bluetooth() self.bt.target_hciconfig_init() + def tearDown(self): + self.bt.disable_bluetooth() + def test_bt_power_on(self): """ Enable bluetooth device diff --git a/meta-iotqa/lib/oeqa/runtime/connectivity/bluetooth/bt_stability.py b/meta-iotqa/lib/oeqa/runtime/connectivity/bluetooth/bt_stability.py index 5db2f4c6cd..7dda54391c 100644 --- a/meta-iotqa/lib/oeqa/runtime/connectivity/bluetooth/bt_stability.py +++ b/meta-iotqa/lib/oeqa/runtime/connectivity/bluetooth/bt_stability.py @@ -11,10 +11,12 @@ class BTStabilityTest(oeRuntimeTest): power_cycles = 200 def setUp(self): - """ - Initialize bluetooth class - """ self.bt = bluetooth.BTFunction(self.target) + self.bt.target_hciconfig_init() + self.bt.enable_bluetooth() + + def tearDown(self): + self.bt.disable_bluetooth() def test_bt_onoff_multiple_times(self): """ From 6108981d6318392c84a7e73164de27c84747eb20 Mon Sep 17 00:00:00 2001 From: Jair Gonzalez Date: Thu, 4 May 2017 12:25:40 -0500 Subject: [PATCH 04/21] meta-refkit: Add bluetoothctl to development images bluetoothctl has been added to the list of bad recommendations to reduce GPLv3 dependencies. However, there are Bluetooth test cases that make use of this tool. This change removes it from the list, only for development images. Signed-off-by: Jair Gonzalez --- meta-refkit-core/classes/refkit-image.bbclass | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/meta-refkit-core/classes/refkit-image.bbclass b/meta-refkit-core/classes/refkit-image.bbclass index 7ae98794dc..728b0c3255 100644 --- a/meta-refkit-core/classes/refkit-image.bbclass +++ b/meta-refkit-core/classes/refkit-image.bbclass @@ -139,6 +139,12 @@ inherit refkit-hash-dm-verity-key # used only in "development" configuration. FEATURE_PACKAGES_common-test = "packagegroup-common-test" +# bluetoothctl is used on "development" configuration to execute some of +# the test cases for Bluetooth. It is enabled here because adding +# it to packagegroup-common-test has no effect due to a bug +# on BAD_RECOMMENDS functionality (YOCTO #11427) +BAD_RECOMMENDATIONS_remove = "${@ 'bluez5-client' if (d.getVar('IMAGE_MODE') or 'production') != 'production' else '' }" + # Additional features and packages used by all profile images # and the refkit-image-common.bb. Not essential for booting # and thus not included in refkit-image-minimal.bb. Product From 89ce7179ad23465fb1b57c8b0add9d27d4ee2e41 Mon Sep 17 00:00:00 2001 From: Jair Gonzalez Date: Thu, 25 May 2017 13:14:09 -0700 Subject: [PATCH 05/21] meta-iotqa: Mask out single node Bluetooth tests on QEMU Bluetooth tests can not be executed on QEMU, so add single node tests to its mask file. Signed-off-by: Jair Gonzalez --- meta-iotqa/conf/test/qemu.mask | 2 ++ 1 file changed, 2 insertions(+) diff --git a/meta-iotqa/conf/test/qemu.mask b/meta-iotqa/conf/test/qemu.mask index c73e5fe87b..d170effed7 100644 --- a/meta-iotqa/conf/test/qemu.mask +++ b/meta-iotqa/conf/test/qemu.mask @@ -1,6 +1,8 @@ # This file contains tests that can't be run on QEMU # /docker/tester-exec.sh will use this file to remove tests from a manifest oeqa.runtime.connectivity.bluetooth.btcheck +oeqa.runtime.connectivity.bluetooth.bt_command +oeqa.runtime.connectivity.bluetooth.bt_6lowpan oeqa.runtime.connectivity.wifi.wifi_connect oeqa.runtime.peripherals.mraa.mraa_gpio oeqa.runtime.multimedia.audio.alsa From 345cfa376886bf275e3ff9723dd9e13853bf678d Mon Sep 17 00:00:00 2001 From: Jair Gonzalez Date: Fri, 23 Jun 2017 16:56:17 -0500 Subject: [PATCH 06/21] meta-iotqa: Mask out single node Bluetooth tests on MinnowBoard Turbot Mask Bluetooth tests on MinnowBoard Turbot as the hci0 interface stops responding when Bluetooth is disabled on the current CI configuration: Expansion board: Silverjaw Lure (SKU 1000) Wireless adapter: Intel Dual Band Wireless-AC 7260 - 7260HMW YOCTO #11714 Signed-off-by: Jair Gonzalez --- meta-iotqa/conf/test/minnowboardturbot.mask | 2 ++ 1 file changed, 2 insertions(+) diff --git a/meta-iotqa/conf/test/minnowboardturbot.mask b/meta-iotqa/conf/test/minnowboardturbot.mask index 395cc02e09..722b20d111 100644 --- a/meta-iotqa/conf/test/minnowboardturbot.mask +++ b/meta-iotqa/conf/test/minnowboardturbot.mask @@ -1,2 +1,4 @@ # This file contains tests that can't be run on Minnowboard Turbot # /docker/tester-exec.sh will use this file to remove tests from a manifest +oeqa.runtime.connectivity.bluetooth.bt_command +oeqa.runtime.connectivity.bluetooth.bt_6lowpan From b90b795a69bbfdce1df85b77a5df52fc14458d1e Mon Sep 17 00:00:00 2001 From: Krisztian Litkey Date: Tue, 27 Jun 2017 10:09:44 +0300 Subject: [PATCH 07/21] refkit-ostree: swallow refkit-ostree from its git repository. Pulled in the code for refkit-ostree from its git repository https://github.com/klihub/refkit-ostree-upgrade.git. Axed out shave support. Renamed systemd services, targets (and related files) for slightly better consistency. All the service names are now prefixed with refkit instead of ostree. Fixed 32-bit x86 loader to be correctly named tbootia32.efi. Added a post-apply hook for updating the UEFI combo app if it has changed between the old and the new ostree deployments. Added a hook to put the post-update marker in place which can be used upon the next reboot to trigger post-update checks. Added post-update-check hook script and a final hook that just marks the update successful and ok. Added a post-apply hook to request a reboot after a successful system update. The reboot will activate and boot into the latest ostree deployment. Re- moved the original reboot-hook and all the related code. Entities that need to prevent (= delay) the shutdown to a more convenient time in the future should use systemd-inhibit or the corresponding systemd interfaces for doing so. For instance, if you have an interactive shell while the updater is running and you don't want the system to be rebooted under you in case an update gets pulled in, you should do a systemd-inhibit --what=shutdown $SHELL and exit that new shell, once you're finished and want to allow the reboot/update to proceed. Because of known limitations in systemd-inhibit (see below) also added a systemd service which waits (currently indefinitely) for all systemd-inhibit shutdown locks then reboots the system. This is necessary as systemd-inhibit shutdown locks are ignored both for privileged users (explictly ignore for root) and also for non-interactive invocations of systemctl (to provide backward- compatible behavior for scripts that explicitly run shutdown or reboot which are now just symlinked to systemctl). These are all known limitations of systemd-logind which hosts the inhibitor lock code and API. There are feature requests open to add a logind.conf configuration entry to override the default behavior and obey locks for root and scripts as well. You can refer to https://github.com/systemd/systemd/issues/949, or https://github.com/systemd/systemd/issues/2680 for more details. The actual inhibitable reboot is implemented by the newly added straightforward systemd-inhibitable-reboot script. Signed-off-by: Krisztian Litkey --- .../files/refkit-ostree/LICENSE-BSD | 26 + .../files/refkit-ostree/Makefile.am | 52 + .../files/refkit-ostree/README.md | 2 + .../files/refkit-ostree/bootstrap | 31 + .../files/refkit-ostree/configure.ac | 90 + .../hooks/post-apply.d/00-update-uefi-app | 169 ++ .../hooks/post-apply.d/98-mark-updated | 6 + .../hooks/post-apply.d/99-reboot | 5 + .../hooks/post-update-check.d/99-mark-ok | 4 + .../files/refkit-ostree/hooks/run-hooks | 24 + .../files/refkit-ostree/scripts/refkit-ostree | 714 +++++++ .../scripts/systemd-inhibitable-reboot | 8 + .../refkit-patch-ostree-param.service | 13 + .../services/refkit-reboot.service | 7 + .../services/refkit-update-post-check.service | 14 + .../refkit-update-post-failure.service | 11 + .../refkit-update-post-failure.target | 6 + .../services/refkit-update.service | 13 + .../files/refkit-ostree/src/refkit-ostree.c | 1643 +++++++++++++++++ .../refkit-ostree/refkit-ostree.bb | 42 + .../refkit-ostree/refkit-ostree_git.bb | 36 - 21 files changed, 2880 insertions(+), 36 deletions(-) create mode 100644 meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/LICENSE-BSD create mode 100644 meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/Makefile.am create mode 100644 meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/README.md create mode 100755 meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/bootstrap create mode 100644 meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/configure.ac create mode 100755 meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/hooks/post-apply.d/00-update-uefi-app create mode 100755 meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/hooks/post-apply.d/98-mark-updated create mode 100755 meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/hooks/post-apply.d/99-reboot create mode 100755 meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/hooks/post-update-check.d/99-mark-ok create mode 100644 meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/hooks/run-hooks create mode 100644 meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/scripts/refkit-ostree create mode 100755 meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/scripts/systemd-inhibitable-reboot create mode 100644 meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/services/refkit-patch-ostree-param.service create mode 100644 meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/services/refkit-reboot.service create mode 100644 meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/services/refkit-update-post-check.service create mode 100644 meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/services/refkit-update-post-failure.service create mode 100644 meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/services/refkit-update-post-failure.target create mode 100644 meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/services/refkit-update.service create mode 100644 meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/src/refkit-ostree.c create mode 100644 meta-refkit-core/recipes-ostree/refkit-ostree/refkit-ostree.bb delete mode 100644 meta-refkit-core/recipes-ostree/refkit-ostree/refkit-ostree_git.bb diff --git a/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/LICENSE-BSD b/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/LICENSE-BSD new file mode 100644 index 0000000000..a52ad554df --- /dev/null +++ b/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/LICENSE-BSD @@ -0,0 +1,26 @@ +Copyright (c) 2012, 2013, Intel Corporation + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/Makefile.am b/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/Makefile.am new file mode 100644 index 0000000000..6f69eab760 --- /dev/null +++ b/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/Makefile.am @@ -0,0 +1,52 @@ +HOOK_DIR = ${datadir}/${PACKAGE_NAME}/hooks +AM_CFLAGS = $(WARNING_CFLAGS) -DHOOK_DIR="\"${HOOK_DIR}\"" + +bin_PROGRAMS = +script_SCRIPTS = scripts/refkit-ostree scripts/systemd-inhibitable-reboot +scriptdir = ${datadir}/${PACKAGE_NAME}/scripts + +servicedir = ${SYSTEMD_UNITDIR} +service_DATA = \ + services/refkit-patch-ostree-param.service \ + services/refkit-update.service \ + services/refkit-update-post-check.service \ + services/refkit-reboot.service \ + services/refkit-update-post-failure.service \ + services/refkit-update-post-failure.target + +# refkit-ostree +bin_PROGRAMS += refkit-ostree + +refkit_ostree_SOURCES = src/refkit-ostree.c +refkit_ostree_CFLAGS = $(AM_CFLAGS) $(OSTREE_CFLAGS) +refkit_ostree_LDADD = + +# refkit-ostree-update +bin_PROGRAMS += refkit-ostree-update + +refkit_ostree_update_SOURCES = src/refkit-ostree.c +refkit_ostree_update_CFLAGS = $(AM_CFLAGS) $(OSTREE_CFLAGS) -D__REFKIT_UPDATER__ +refkit_ostree_update_LDADD = $(OSTREE_LIBS) + +install-data-local: + install -m 0755 -d $(DESTDIR)$(HOOK_DIR)/post-apply.d + install -m 0755 -T $(top_srcdir)/hooks/run-hooks \ + $(DESTDIR)$(HOOK_DIR)/post-apply + for h in $(top_srcdir)/hooks/post-apply.d/[0-9]*; do \ + if [ -x $$h ]; then \ + install -m 0755 $$h $(DESTDIR)$(HOOK_DIR)/post-apply.d; \ + fi; \ + done + + install -m 0755 -d $(DESTDIR)$(HOOK_DIR)/post-update-check.d + install -m 0755 -T $(top_srcdir)/hooks/run-hooks \ + $(DESTDIR)$(HOOK_DIR)/post-update-check + for h in $(top_srcdir)/hooks/post-update-check.d/[0-9]*; do \ + if [ -x $$h ]; then \ + install -m 0755 $$h $(DESTDIR)$(HOOK_DIR)/post-update-check.d; \ + fi; \ + done + + install -m 0755 -d $(DESTDIR)$(HOOK_DIR)/rollback.d + install -m 0755 -T $(top_srcdir)/hooks/run-hooks \ + $(DESTDIR)$(HOOK_DIR)/rollback diff --git a/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/README.md b/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/README.md new file mode 100644 index 0000000000..c24e7a7bf4 --- /dev/null +++ b/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/README.md @@ -0,0 +1,2 @@ +This package contains extra binaries and scripts used to help booting +and keeping up-to-date devices running IoT Reference OS Kit images. diff --git a/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/bootstrap b/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/bootstrap new file mode 100755 index 0000000000..9f23f9abfb --- /dev/null +++ b/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/bootstrap @@ -0,0 +1,31 @@ +#!/bin/bash + +mkdir m4 + +aclocal -I . && \ + autoheader && \ + libtoolize --copy --force && \ + autoconf && \ + automake --add-missing --copy + +status=$? + +if [ $status != 0 ]; then + echo "Failed to bootstrap." + exit $status +fi + +if [ -n "$1" ]; then + case $1 in + noconf*|NOCONF*) + exit 0 + ;; + *) + ;; + esac +fi + + +if [ -z "$NOCONFIGURE" ]; then + ./configure $* +fi diff --git a/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/configure.ac b/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/configure.ac new file mode 100644 index 0000000000..5b4486ea3d --- /dev/null +++ b/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/configure.ac @@ -0,0 +1,90 @@ + +# -*- Autoconf -*- +# Process this file with autoconf to produce a configure script. + +AC_PREREQ(2.59) + +AC_INIT([refkit-ostree], [0.0.0], [BUG-REPORT-ADDRESS]) + +AC_CONFIG_MACRO_DIR([m4]) +AC_CONFIG_HEADER([src/config.h]) +AM_INIT_AUTOMAKE([-Wno-portability subdir-objects foreign]) + +AC_SUBST(ACLOCAL_AMFLAGS, "-I m4") + +# Disable static libraries. +AC_DISABLE_STATIC + +# Checks for programs. +AC_PROG_CC +AC_PROG_CC_C99 +AC_PROG_INSTALL +AC_PROG_LN_S +AM_PROG_CC_C_O +AM_PROG_LIBTOOL + +# Make first invocation of PKG_CHECK_MODULES 'if-then-else-fi'-safe. +PKG_PROG_PKG_CONFIG + +# Checks for header files. +AC_PATH_X +AC_CHECK_HEADERS([stdio.h stdlib.h stdarg.h string.h locale.h]) + +# Checks for typedefs, structures, and compiler characteristics. + +# Checks for library functions. + + +# Check and enable extra compiler warnings if they are supported. +AC_ARG_ENABLE(extra-warnings, + [ --enable-extra-warnings enable extra compiler warnings], + [extra_warnings=$enableval], [extra_warnings=auto]) + +WARNING_CFLAGS="" +warncflags="-Wall -Wextra" +if test "$extra_warnings" != "no"; then + save_CPPFLAGS="$CPPFLAGS" + for opt in $warncflags; do + AC_PREPROC_IFELSE([AC_LANG_PROGRAM([])], + [WARNING_CFLAGS="$WARNING_CFLAGS $opt"]) + done + CPPFLAGS="$save_CPPFLAGS" +fi + +AC_SUBST(WARNING_CFLAGS) + +# Allow overriding systemds' unitdir. +AC_ARG_WITH([systemdunitdir], + AC_HELP_STRING([--with-systemdunitdir=DIR], [systemd unit directory]), + [with_systemdunitdir=${withval}], + [with_systemdunitdir="`$PKG_CONFIG --variable=systemdsystemunitdir systemd`"]) + +if test -n "${with_systemdunitdir}"; then + SYSTEMD_UNITDIR="${with_systemdunitdir}" + AC_SUBST(SYSTEMD_UNITDIR) +fi + +# Check for OSTree headers and libraries. +PKG_CHECK_MODULES(OSTREE, ostree-1) + +# Allow substitution for LIBDIR and SYSCONFDIR. +AC_MSG_CHECKING([libdir]) +AC_MSG_RESULT([$libdir]) +AC_SUBST(LIBDIR, [$libdir]) +AC_MSG_CHECKING([sysconfdir]) +AC_MSG_RESULT([$sysconfdir]) +AC_SUBST(SYSCONFDIR, [$sysconfdir]) +AC_SUBST(DATADIR, [$datadir]) + +# Generate output. +AC_CONFIG_FILES([ + Makefile + ]) +AC_OUTPUT + +# Display the configuration. +echo "----- configuration -----" +echo "Extra C warnings flags: $WARNING_CFLAGS" +echo "Cross-compiling: $cross_compiling" +echo "Systemd unitdir: $SYSTEMD_UNITDIR" +echo "OSTree libraries: $OSTREE_LIBS" diff --git a/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/hooks/post-apply.d/00-update-uefi-app b/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/hooks/post-apply.d/00-update-uefi-app new file mode 100755 index 0000000000..dc996bec88 --- /dev/null +++ b/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/hooks/post-apply.d/00-update-uefi-app @@ -0,0 +1,169 @@ +#!/bin/sh + +# UUIDs used for external and internal root devices. +INTERNAL_UUID='12345678-9abc-def0-0fed-cba987654320' +EXTERNAL_UUID='12345678-9abc-def0-0fed-cba987654321' +# Wheter EFI partition was already mounted. +EFI_PREMOUNTED="" + +# Determine root device. +root_device () { + local _var=${1:-ROOT_DEV} + local _dev + + _dev=$(cat /proc/mounts | grep ' / ' | cut -d ' ' -f 1) + + if [ -z "$_dev" ]; then + return 1 + fi + + _dev=$(realpath $_dev) + + if [ -z "$_dev" ]; then + return 1 + fi + + eval "$_var=\"$_dev\"" + return 0 +} + +# Determine the UUID of /. +root_uuid () { + local _var=${1:-ROOT_UUID} + local _val + + _val=$(cat /proc/cmdline | tr -s '\t' ' ' | tr ' ' '\n' | grep root=) + + if [ -z "$_val" ]; then + return 1 + fi + + _val=${_val#root=} + _val=${_val#PARTUUID=} + + eval "$_var=\"$_val\"" + return 0 +} + +# Determine EFI device (assume 1st partition on the same block device as /). +efi_device () { + local _var=${1:-EFI_DEV} + local _efi + + if ! root_device _efi; then + return 1 + fi + + _efi="${_efi%[0-9]*}1" + + eval "$_var=\"$_efi\"" + return 0 +} + +# Mount the EFI partition. +mount_efi_partition () { + local _path=${1:-/boot/efi} + local _dev + + if [ -d $_path/EFI/BOOT ]; then + EFI_PREMOUNTED=yes + return 0 + fi + + if ! efi_device _dev; then + return 1 + fi + + mount -t vfat $_dev $_path +} + +# Umount the EFI partition (if it was not already premounted). +umount_efi_partition () { + local _path=${1:-/boot/efi} + + if [ -n "$EFI_PREMOUNTED" ]; then + return 0 + fi + + umount $_path +} + +# Get the UEFI combo app name. +uefi_app () { + local _var=${1:-COMBO_APP} + local _arch=$(uname -m) + local _val + + # XXX TODO: we only do x86* for now... + case $_arch in + *64) _val=bootx64.efi;; + *) _val=bootia32.efi;; + esac + + eval "$_var=\"$_val\"" + return 0 +} + +# Update the UEFI combo app if it has changed. +update_uefi_app () { + local _old="$1" + local _new="$2" + local _uuid _arch _app _type _active _n + + if ! root_uuid _uuid; then + return 1 + fi + + if ! uefi_app _app; then + return 1 + fi + + case $_uuid in + $EXTERNAL_UUID) _type=ext;; + $INTERNAL_UUID) _type=int;; + *) return 1;; + esac + + _n=$_new/usr/lib/ostree-boot/$_app.$_type + _active=/boot/efi/EFI/BOOT/$_app + + if cmp $_n-* $_active > /dev/null; then + echo "UEFI combo app already up to date." + return 0 + fi + + echo "Backing up previous UEFI combo app ($_active)..." + $xeq cp $_active $_active.old + echo "Copying new UEFI combo app in place..." + $xeq cp $_n-* $_active + sync + + return $? +} + +# main script +while [ "${1#-}" != "$1" -a -n "$1" ]; do + case $1 in + --dry-run|-n) xeq=echo; shift;; + --debug|-d) set -x; shift;; + *) echo "ignoring unknown option $1..."; shift;; + esac +done + +set -e + +if ! mount_efi_partition; then + echo "Failed to mount EFI partition." + exit 1 +fi + +if ! update_uefi_app $1 $2; then + echo "Failed to udpate UEFI combo app." + exit 1 +fi + +if ! umount_efi_partition; then + echo "Failed to umount EFI partition." +fi + +exit 0 diff --git a/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/hooks/post-apply.d/98-mark-updated b/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/hooks/post-apply.d/98-mark-updated new file mode 100755 index 0000000000..9bc4dae76c --- /dev/null +++ b/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/hooks/post-apply.d/98-mark-updated @@ -0,0 +1,6 @@ +#!/bin/sh + +# Put an update marker in place which will then trigger post-update +# checks upon the next boot. + +echo "$1 $2" > /var/.ostree.updated diff --git a/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/hooks/post-apply.d/99-reboot b/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/hooks/post-apply.d/99-reboot new file mode 100755 index 0000000000..541beccce9 --- /dev/null +++ b/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/hooks/post-apply.d/99-reboot @@ -0,0 +1,5 @@ +#!/bin/sh + +echo "Triggering refkit-reboot service for rebooting..." +sync +systemctl start refkit-reboot diff --git a/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/hooks/post-update-check.d/99-mark-ok b/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/hooks/post-update-check.d/99-mark-ok new file mode 100755 index 0000000000..0ad241ef54 --- /dev/null +++ b/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/hooks/post-update-check.d/99-mark-ok @@ -0,0 +1,4 @@ +#!/bin/sh + +echo "Marking the latest update as successful and working..." +mv /var/.ostree.updated /var/.ostree.update-ok diff --git a/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/hooks/run-hooks b/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/hooks/run-hooks new file mode 100644 index 0000000000..c298684d3d --- /dev/null +++ b/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/hooks/run-hooks @@ -0,0 +1,24 @@ +#!/bin/sh + +if [ $# = 0 -a "${0##*/}" = "post-update-check" ]; then + read prev curr < /var/.ostree.updated + set $prev $curr +fi + +HOOK_DIR=$0.d +HOOK_TYPE=${0##*/} + +for h in $HOOK_DIR/[0-9]*-*; do + if [ -e $h -a -x $h ]; then + hook=${h##*/} + echo "Executing $HOOK_TYPE hook $hook..." + $h $* + status=$? + if [ $status != 0 ]; then + echo "hook $hook failed." + exit $? + fi + fi +done + +exit 0 diff --git a/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/scripts/refkit-ostree b/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/scripts/refkit-ostree new file mode 100644 index 0000000000..db5f328598 --- /dev/null +++ b/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/scripts/refkit-ostree @@ -0,0 +1,714 @@ +#!/bin/sh + +# Partition UUIDs for internal storage and removable media. +INTERNAL_UUID='12345678-9abc-def0-0fed-cba987654320' +EXTERNAL_UUID='12345678-9abc-def0-0fed-cba987654321' + +# Our distro name. +DISTRO='refkit' + +# update poll interval +POLL_INTERVAL=30 + +# upgrade poll and apply markers +POLL_MARKER=/run/ostree.polling +APPLY_MARKER=/run/ostree.applying +POST_UPDATE_MARKER=/var/.ostree.updated + +# Print an informational message. +info_msg () { + [ -n "$INITRAMFS" ] && echo "$*" > /dev/console || echo "$*" +} + +# Print an error message. +error_msg () { + [ -n "$INITRAMFS" ] && echo "$*" > /dev/console || echo "error: $*" +} + +# Print a fatal error message, starting a shell if we're in development mode. +fatal_msg () { + if [ -n "$INITRAMFS" ]; then + if grep -q "imagetype=development" /proc/cmdline; then + error_msg "$*" + /bin/sh + else + echo "$*" > /dev/console + exit 1 + fi + else + echo "fatal error: $*" + exit 1 + fi +} + +# Print a debug message. +debug_msg () { + if [ -z "$DEBUG" ]; then + return 0 + fi + [ -n "$INITRAMFS" ] && echo "$*" > /dev/console || echo "D: $*" +} + +# Stat a filesystem entry for device and and inode number. +dev_inode () { + local _path="$1" + local _var="${2:-DEVNODE}" + local _dev="" + local _inode="" + local _line="$(stat $_path | grep ^Device: | tr -s '\t' ' ')" + + if [ -z "$_line" ]; then + return 1 + fi + + _line=${_line#Device: } + _dev=${_line%%/*} + _line=${_line#*Inode: } + _inode=${_line%% *} + + eval "${_var}=\"$_dev:$_inode\"" + + return 0 +} + +# Determine the device corresponding to the currently mounted root. +root_device () { + local _var="${1:-ROOTDEV}" + local _dev + + _dev=$(cat /proc/mounts | grep ' / ' | cut -d ' ' -f 1) + + if [ -z "$_dev" ]; then + return 1 + fi + + _dev=$(realpath $_dev) + + eval "$_var=\"$_dev\"" + return 0 +} + +# Determine the EFI device path (part #1 on the same block device as our root). +efi_device () { + local _var="${1:-EFIDEV}" + local _efi + + if ! root_device _efi; then + return 1 + fi + + _efi="${_efi%[0-9]*}1" + + eval "$_var=\"$_efi\"" + return 0 +} + +# Determine root partition UUID +root_uuid () { + local _var="${1:-ROOT_UUID}" + local _val + + _val=$(cat /proc/cmdline | tr -s '\t' ' ' | tr ' ' '\n' | grep root=) + + if [ -z "$_val" ]; then + return 1 + fi + + _val=${_val#root=PARTUUID=} + + eval "$_var=\"$_val\"" + return 0 +} + +# Discover BootLoaderSpec-compliant boot loader entries. +ostree_loader_entries () { + local _boot=${1:-/boot} + local _evar=${2:-ENTRY} + local _ovar=${3:-PATH} + local _ecurr=""; _eprev="" + local _ocurr=""; _oprev="" + local _vcurr=0; _vprev=0 + local _e _v _o + + # Discover all loader entries $1/loader/entries/ostree-*.conf. + # Parse the found ones for version and kernel command-line then + # pick the two most recent ones. + for _e in $_boot/loader/entries/ostree-*.conf; do + _v=$(grep '^version ' $_e) + _v=${_v#version } + _o=$(grep '^options ' $_e) + _o=${_o#options } + + if [ $_v -gt $_vcurr ]; then + _vprev=$_vcurr + _eprev=$_ecurr + _oprev=$_ocurr + _vcurr=$_v + _ecurr=$_e + _ocurr=$_o + elif [ $_v -gt $_vprev ]; then + _vprev=$_v + _eprev=$_e + _oprev=$_o + fi + done + + if [ -z "$_ecurr" ]; then + return 1 + fi + + eval "${_evar}_CURR=\"$_ecurr\"" + eval "${_evar}_PREV=\"$_eprev\"" + eval "${_ovar}_CURR=\"${_ocurr#ostree=}\"" + eval "${_ovar}_PREV=\"${_oprev#ostree=}\"" + + return 0 +} + +# Get the canonical (real) path of an ostree deployment/boot entry. +ostree_canonical_path () { + local _var=${2:-CANONICAL} _c + + if [ -z "$1" ]; then + return 1 + fi + + _c=$(realpath $1) + + if [ -z "$_c" ]; then + return 1 + fi + + eval "$_var=\"$_c\"" + return 0 +} + +# Get the boot entry path of an ostree deployment. +ostree_boot_path () { + local _var="${2:-BOOT}" _b + local _pnode _bnode + + if ! dev_inode $1 _pnode; then + return 1 + fi + + for _b in /sysroot/ostree/boot.0/$DISTRO/*/?; do + if ! dev_inode $_b/. _bnode; then + return 1 + fi + if [ "$_pnode" = "$_bnode" ]; then + eval "$_var=\"${_b#/sysroot}\"" + return 0 + fi + done + + return 1 +} + +# Discover currently running ostree entry. +ostree_running_entry () { + local _var="${1:-RUNNING}" + local _r_stat _e_stat + + if ! dev_inode / _r_stat; then + return 1 + fi + + for _e in /sysroot/ostree/deploy/$DISTRO/deploy/*.?; do + if ! dev_inode $_e _e_stat; then + return 1 + fi + + if [ "$_r_stat" = "$_e_stat" ]; then + eval "$_var=\"${_e#/sysroot}\"" + return 0 + fi + done + + return 1 +} + +# Make the given directories/mount points movable. +make_movable () { + for _d in $*; do + debug_msg "making $_d movable..." + mount --bind $_d $_d || return 1 + done + + return 0 +} + +# Fill in the missing details in an ostree deployment root. +prepare_root () { + local _root=$1 + + # + # We need to prepare the ostree deployment root to be kosher for + # pivot-rooting and thus eventually becoming the final root. + # This involves stitching it together from several pieces to fill + # in the missing details. Currently we do this as follows: + # + # - bind-mount the common /var into the deployment + # - bind-mount the common /boot into the deployment + # - bind-mount the common /home into the deployment + + debug_msg "preparing $_root for pivot-rooting..." + + cd $_root + + mount --bind ../../var var || return 1 + mount --bind /rootfs/boot $_root/boot || return 1 + mount --bind /rootfs/home $_root/home || return 1 + + cd - > /dev/null + + return 0 +} + +# Shuffle the ostree deployment around so that we eventually pivot-root it. +shuffle_root () { + local _root="$1" + + # + # This code mimicks the last bits of ostree-prepare-root. It shuffles + # /sysroot, /rootfs, and the chosen ostree deployment root ($_root) around + # so that /rootfs becomes $_root/sysroot and $_root becomes /rootfs. Then + # eventually after the final pivot-root we end up with + # + # - current $_root (IOW the ostree deployment root) as / + # - current /rootfs as /sysroot + # + # The only tricky/difficult to follow extra bit here is that we have to + # do these operations with an extra step to avoid trying to move the + # eventual /sysroot (now /rootfs) under itself. + + debug_msg "shuffling /sysroot, /rootfs and $_root for pivot-rooting..." + + cd $_root + + # make /rootfs $_root/sysroot and $_root /rootfs + mkdir -p /sysroot.tmp || return 1 + mount --move $_root /sysroot.tmp || return 1 + mount --move /rootfs sysroot || return 1 + mount --move . /rootfs || return 1 + + cd - > /dev/null + + return 0 +} + +# Prepare the initramfs environment for pivot-rooting into an ostree deployment. +initramfs_prepare_root () { + local _entry_CURR _entry_PREV _path_CURR _path_PREV + + info_msg "* ostree: preparing ostree rootfs for booting..." + + # Discover, and parse loader entries. + if ! ostree_loader_entries /rootfs/boot _entry _path; then + fatal_msg "failed to discover loader entries" + fi + + # Pick the ostree deployment root from the latest entry. + OSTREE_ROOT="$_path_CURR" + info_msg "active ostree entry: $OSTREE_ROOT" + + # Prepare the deployment root for becoming the final runtime root, then + # shuffle /rootfs, /sysroot and the deployment root around so that we + # finally end up pivot-rooting into the deployment with /rootfs ending + # up as /sysroot under it. + make_movable /rootfs$OSTREE_ROOT + prepare_root /rootfs$OSTREE_ROOT + shuffle_root /rootfs$OSTREE_ROOT +} + +# Patch /proc/cmdline with the right ostree kernel argument. +patch_proc_cmdline () { + local _tmpdir=${1:-/run} + local _options="$2" + local _patched=$_tmpdir/cmdline.patched + + info_msg "* Patching /proc/cmdline with ostree arguments..." + + if [ -z "$_options" ]; then + if ! ostree_loader_entries /boot ENTRY OPTIONS; then + error_msg "failed to discover loader entries" + return 1 + else + _options="$OPTIONS_CURR" + fi + fi + + if cat /proc/cmdline | grep -q ostree=; then + if cat /proc/cmdline | grep -q $_options; then + return 0 + else + return 1 + fi + fi + + cat /proc/cmdline | tr -d '\n' > $_patched + echo " ostree=$_options" >> $_patched + chmod og-w $_patched + mount -o ro --bind $_patched /proc/cmdline + + return $? +} + +# Undo patching /proc/cmdline with ostree arguments. +reset_proc_cmdline () { + info_msg "* Resetting /proc/cmdline..." + + while cat /proc/mounts | grep -q /proc/cmdline; do + if ! umount /proc/cmdline; then + return 1 + fi + done + + return 0 +} + +# Mount the EFI partition under the given path or /boot/efi. +mount_efi () { + local _path="${1:-/boot/efi}" + local _dev + + info_msg "* Mounting EFI partition (at $_path)..." + + if cat /proc/mounts | grep ' vfat ' | grep -q " $_path "; then + return 0 + fi + + if ! efi_device _dev; then + return 1 + fi + + mount -t vfat $_dev $_path +} + +# Fetch any pending updates without applying them. +fetch_updates () { + info_msg "* Fetching OS updates..." + + touch $POLL_MARKER + ostree admin upgrade --pull-only + rm -f $POLL_MARKER +} + +# Apply any locally pending updates. +apply_updates () { + local _latest _entry_CURR _entry_PREV _path_CURR _path_PREV _running + local _l _r + + info_msg "* Applying pending OS updates..." + + if ! ostree_running_entry _running; then + fatal_msg "failed to discover running entry" + fi + + info_msg "running version: $_running" + + reset_proc_cmdline + if ! patch_proc_cmdline /run "ostree=$_running"; then + return 1 + fi + + touch $APPLY_MARKER + ostree admin upgrade --deploy-only + rm -f $APPLY_MARKER + + if ! ostree_loader_entries /boot _entry _path; then + fatal_msg "failed to discover loader entries" + fi + + _latest=$(realpath $_path_CURR) + + info_msg "latest version: $_latest" + + dev_inode $_running _r + dev_inode $_latest _l + + if [ "$_l" = "$_r" ]; then + info_msg "no updates, $_running is already running" + return 0 + fi +} + +# Check and update the UEFI combo app if necessary. +update_uefi_app () { + local _entry_CURR _entry_PREV _path_CURR _path_PREV + local _root _uuid _arch _active _latest _efiboot _ostreeboot _ok + + info_msg "* Updating UEFI combo app..." + + if ! ostree_loader_entries /boot _entry _path; then + fatal_msg "failed to discover loader entries" + fi + + if ! mount_efi; then + fatal_msg "failed to mount EFI partition" + fi + + if ! root_uuid _uuid; then + fatal_msg "failed to determine root partition UUID" + fi + + _root=${_path_CURR} + _arch=$(uname -m) + + case $_arch in + *64) _active=bootx64.efi;; + *) _active=bootia32.efi;; + esac + + case $_uuid in + $INTERNAL_UUID) _latest="$_active.int";; + $EXTERNAL_UUID) _latest="$_active.ext";; + *) fatal_msg "unexpected root partition UUID $_uuid";; + esac + + _efiboot=/boot/efi/EFI/BOOT + _ostreeboot=$_root/usr/lib/ostree-boot + + if cmp $_efiboot/$_active $_ostreeboot/$_latest* > /dev/null; then + info_msg "UEFI combo app already up to date." + return 0 + fi + + info_msg "Updating UEFI combo app..." + if ! cp $_efiboot/$_active $_efiboot/$_active.old; then + fatal_msg "Failed to save old UEFI combo app." + fi + + # Leap of faith... + cp $_ostreeboot/$_latest* $_efiboot/$_active + _ok=$? + + if [ $_ok != 0 ]; then + # Ugh, we're screwed... try to restore the original. + mv $_efiboot/$_active.old $_efiboot/$_active + fi + + sync + + if [ $_ok != 0 ]; then + fatal_msg "Failed to update UEFI combo app." + fi + + info_msg "UEFI combo app updated." + + return 0 +} + +# Pull and apply any updates, update UEFI combo app if necessary. +update () { + info_msg "* Updating OS..." + + fetch_updates && apply_updates && update_uefi_app +} + +# SIGUSR handler +sigusr1_handler () { + info_msg "* Received SIGUSR1, polling for updates" + fetch_updates +} + +# Sit around in a loop sleeping and periodically polling for updates. +update_daemon () { + local _i _n + + trap "sigusr1_handler" SIGUSR1 + + # signal(1) does not have an option to exit on receipt of signals. + # Hence, a naive loop with a single sleep(1) would only get around + # to handle any received signals once the sleep is done... which in + # our case would be completely pointless. Therefore we kludge this + # around by breaking up our interval in 10 second chunks to let our + # SIGUSR1 handler kick in with a worst-case delay of 10 seconds. + + info_msg "Entering update poll loop (interval: $POLL_INTERVAL seconds)" + + _n=$(($POLL_INTERVAL / 10)) + while true; do + info_msg "* Polling for available upgdates..." + fetch_updates + _i=0 + while [ $_i -lt $_n ]; do + sleep 10 + let _i=$_i+1 + done + done +} + +# Roll back to the previously active version. +rollback () { + info_msg "* Rollback not implemented..." +} + +# Perform a post-update check. +post_update_check () { + if [ -e $POST_UPDATE_MARKER ]; then + info_msg "* Performing post-update check..." + rm -f $POST_UPDATE_MARKER + else + info_msg "* Post-update marker ($POST_UPDATE_MARKER) not present..." + fi + + return 0 +} + +# Perform a post-update failure/recovery actions if an update failed. +post_update_recovery () { + info_msg "* Performing post-update failure recovery..." + rollback +} + +# Determine the curently running version. +running_version () { + local _entry _bp + + if ! ostree_running_entry _entry; then + fatal_msg "failed to discover running entry" + fi + + info_msg "Running entry: $_entry" + + if ! ostree_boot_path /; then + error_msg "failed to determine boot path using stat" + return 1 + fi + + _bp=$(readlink $BOOT) + info_msg "Boot path: $BOOT ($_bp)" + + return 0 +} + +# Determine the latest version. +latest_version () { + local _entry_CURR _entry_PREV _path_CURR _path_PREV + + if ! ostree_loader_entries /boot _entry _path; then + fatal_msg "failed to discover running entry" + fi + + info_msg "Latest entry: $_path_CURR" +} + +# Print minimal help on usage. +print_help () { + echo "$0 [--debug] command" + echo "" + echo "The possible commands are:" + echo " prepare-root: prepare root for booting" + echo " patch-proc-cmdline: patch /proc/cmdline with ostree=..." + echo " mount-efi: mount the right EFI partition on /boot/efi/EFI" + echo " fetch-updates: fetch updates without applying them" + echo " apply-updates: apply fetched pending updates" + echo " update-uefi-app: apply UEFI app if necessary" + echo " update: fetch and apply updates, update UEFI app" + echo " update-daemon: sit in a loop, fetching updates." + echo " rollback: not implemented" + echo " running-version: show the running ostree deployment" + echo " latest-version: show the latest available deployment" + echo " *: patch /proc/cmdline, then exec ostree with the given arguments" +} + + +############################################# +# main script + +if [ -d /rootfs ]; then + INITRAMFS=yes +fi + +# Parse and remove command-line options. +while [ "${1#-}" != "$1" ]; do + case $1 in + -d|--debug) + DEBUG=true + shift + ;; + -h|--help) + print_help + exit 0 + ;; + --poll-interval|-i) + POLL_INTERVAL=$2 + shift 2 + ;; + --) + shift + break + ;; + *) + echo "Unknown option \'$1\', assuming an ostree native option." + break + ;; + esac +done + +if [ "$1" = "-d" -o "$1" = "--debug" ]; then + DEBUG=true + shift +fi + +case $1 in + initramfs-prepare-root|prepare-root) + initramfs_prepare_root + ;; + + patch-proc-cmdline) + patch_proc_cmdline + ;; + + mount-efi) + mount_efi + ;; + + fetch-updates) + fetch_updates + ;; + + apply-updates) + apply_updates + ;; + + update-uefi-app) + update_uefi_app + ;; + + update) + update + ;; + + update-daemon) + update_daemon + ;; + + rollback) + ;; + + post-update-check) + post_update_check + ;; + + post-update-recovery) + post_update_recovery + ;; + + running-version|running) + running_version + ;; + + latest-version|latest) + latest_version + ;; + + # pass the rest directly to ostree + *) + patch_proc_cmdline + exec ostree $* + ;; +esac + +exit $? diff --git a/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/scripts/systemd-inhibitable-reboot b/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/scripts/systemd-inhibitable-reboot new file mode 100755 index 0000000000..fa4348e48d --- /dev/null +++ b/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/scripts/systemd-inhibitable-reboot @@ -0,0 +1,8 @@ +#!/bin/sh + +# Wait for systemd-inhibit shutdown locks, then reboot. +while systemd-inhibit --list | grep What: | grep -q shutdown; do + sleep 10 +done + +systemctl reboot diff --git a/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/services/refkit-patch-ostree-param.service b/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/services/refkit-patch-ostree-param.service new file mode 100644 index 0000000000..3d140bb342 --- /dev/null +++ b/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/services/refkit-patch-ostree-param.service @@ -0,0 +1,13 @@ +[Unit] +Description=Patch kernel command line with ostree parameter +DefaultDependencies=no +After=systemd-remount-fs.service systemd-tmpfiles-setup.service tmp.mount +Before=sysinit.target + +[Service] +Type=oneshot +ExecStart=/usr/bin/refkit-ostree-update --patch-procfs +RemainAfterExit=yes + +[Install] +WantedBy=sysinit.target diff --git a/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/services/refkit-reboot.service b/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/services/refkit-reboot.service new file mode 100644 index 0000000000..45654b2dfe --- /dev/null +++ b/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/services/refkit-reboot.service @@ -0,0 +1,7 @@ +[Unit] +Description=Systemd-inhibitable reboot service +DefaultDependencies=no + +[Service] +Type=simple +ExecStart=/usr/share/refkit-ostree/scripts/systemd-inhibitable-reboot diff --git a/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/services/refkit-update-post-check.service b/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/services/refkit-update-post-check.service new file mode 100644 index 0000000000..5f12835e92 --- /dev/null +++ b/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/services/refkit-update-post-check.service @@ -0,0 +1,14 @@ +[Unit] +Description=Perform post-update checks at the first boot after an update. +After=network.target +ConditionPathExists=/var/.ostree.updated + +[Service] +Type=oneshot +ExecStart=/usr/share/refkit-ostree/hooks/post-update-check +# OnFailure=ostree-post-update-failure.target +RemainAfterExit=Yes +TimeoutSec=0 + +[Install] +WantedBy=multi-user.target diff --git a/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/services/refkit-update-post-failure.service b/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/services/refkit-update-post-failure.service new file mode 100644 index 0000000000..672d066671 --- /dev/null +++ b/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/services/refkit-update-post-failure.service @@ -0,0 +1,11 @@ +[Unit] +Description=Handle post-update check failure (potential rollback) +DefaultDependencies=no +Conflicts=shutdown.target +Conflicts=rescue.service +Conflicts=syslog.socket +Before=shutdown.target + +[Service] +ExecStart=/usr/bin/refkit-ostree-update --rollback +Type=simple diff --git a/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/services/refkit-update-post-failure.target b/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/services/refkit-update-post-failure.target new file mode 100644 index 0000000000..b0d67d920a --- /dev/null +++ b/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/services/refkit-update-post-failure.target @@ -0,0 +1,6 @@ +[Unit] +Description=OSTree Post-Update Failure Mode +Documentation=man:systemd.special(7) +Requires=ostree-post-update-failure.service +After=emergency.service +AllowIsolate=yes diff --git a/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/services/refkit-update.service b/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/services/refkit-update.service new file mode 100644 index 0000000000..d78fabd9b1 --- /dev/null +++ b/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/services/refkit-update.service @@ -0,0 +1,13 @@ +[Unit] +Description=RefKit Base OS Updater + +[Service] +Type=simple +StandardInput=null +StandardOutput=journal +StandardError=inherit +RemainAfterExit=no +ExecStart=/usr/bin/refkit-ostree-update --check-interval 300 + +[Install] +WantedBy=multi-user.target diff --git a/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/src/refkit-ostree.c b/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/src/refkit-ostree.c new file mode 100644 index 0000000000..f40e60260f --- /dev/null +++ b/meta-refkit-core/recipes-ostree/refkit-ostree/files/refkit-ostree/src/refkit-ostree.c @@ -0,0 +1,1643 @@ +/* + * Copyright (c) 2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#define _GNU_SOURCE /* getopt_long */ +#include +#include +#include +#include +#include +#include +#include + +#include + +/* defaults */ +#define UPDATER_HOOK(path) HOOK_DIR"/"path +#define UPDATER_HOOK_APPLY UPDATER_HOOK("post-apply") +#define UPDATER_INTERVAL (15 * 60) +#define UPDATER_DISTRO "refkit" +#define UPDATER_PREFIX "REFKIT_OSTREE" + +/* updater modes */ +enum { + UPDATER_MODE_DEFAULT, + UPDATER_MODE_FETCH = 0x1, /* only fetch, don't apply */ + UPDATER_MODE_APPLY = 0x2, /* don't fetch, only apply */ + UPDATER_MODE_UPDATE = 0x3, /* fetch and apply updates */ + UPDATER_MODE_ENTRIES, /* parse and show boot entries */ + UPDATER_MODE_RUNNING, /* show running entry */ + UPDATER_MODE_LATEST, /* show running entry */ + UPDATER_MODE_PATCH, /* patch /proc/cmdline */ + UPDATER_MODE_PREPARE, /* prepare root from initramfs */ +}; + +/* printing modes */ +enum { + PRINT_HUMAN_READABLE, /* for human/primate consumption */ + PRINT_SHELL_EVAL, /* for shell eval */ + PRINT_SHELL_EXPORT, /* for shell eval, exporting */ +}; + +/* a boot entry */ +typedef struct { + int id; /* 0/1 entry id */ + int version; /* entry version */ + char *options; /* entry options */ + char *boot; /* boot path */ + char *deployment; /* resolved to deployment */ + dev_t device; /* device number */ + ino_t inode; /* inode number */ +} boot_entry_t; + +/* updater runtime context */ +typedef struct { + int mode; /* mode of operation */ + int interval; /* update check interval */ + int oneshot; /* run once, then exit */ + const char *distro; /* distro name */ + OstreeRepo *repo; /* ostree repo instance */ + OstreeSysroot *sysroot; /* ostree sysroot instance */ + OstreeSysrootUpgrader *u; /* ostree sysroot upgrader */ + const char *hook_apply; /* post-update script */ + int inhibit_fd; /* shutdown inhibitor pid */ + int inhibit_pid; /* active inhibitor process */ + const char *argv0; /* us... */ + boot_entry_t entries[2]; /* boot entries */ + int nentry; + int latest; /* latest boot entry */ + int running; /* running boot entry */ + int print; /* var/setup printing mode */ + const char *prefix; /* shell variable prefix */ +} context_t; + +/* fd redirection for child process */ +typedef struct { + int parent; /* original file descriptor */ + int child; /* dupped to this one */ +} redirfd_t; + +/* a file/directory, potentially under a potentially prefixed root */ +typedef struct { + const char *prefix; + const char *root; + const char *path; +} path_t; + + +/* log levels, current log level */ +enum { + UPDATER_LOG_NONE = 0x00, + UPDATER_LOG_FATAL = 0x01, + UPDATER_LOG_ERROR = 0x02, + UPDATER_LOG_WARN = 0x04, + UPDATER_LOG_INFO = 0x08, + UPDATER_LOG_DEBUG = 0x10, + UPDATER_LOG_ALL = 0x1f, + UPDATER_LOG_DAEMON = UPDATER_LOG_WARN|UPDATER_LOG_ERROR|UPDATER_LOG_FATAL, + UPDATER_LOG_CONSOLE = UPDATER_LOG_INFO|UPDATER_LOG_DAEMON, +}; + +static int log_mask; + +/* logging macros */ +#define log_fatal(...) do { \ + log_msg(UPDATER_LOG_FATAL, __VA_ARGS__); \ + exit(1); \ + } while (0) +#define log_error(...) log_msg(UPDATER_LOG_ERROR, __VA_ARGS__) +#define log_warn(...) log_msg(UPDATER_LOG_WARN , __VA_ARGS__) +#define log_info(...) log_msg(UPDATER_LOG_INFO , __VA_ARGS__) +#define log_debug(...) log_msg(UPDATER_LOG_DEBUG, __VA_ARGS__) + +/* macro to tag unused variables */ +#define UNUSED_VAR(v) (void)v + + +static void log_msg(int lvl, const char *fmt, ...) +{ + static const char *prefix[] = { + [UPDATER_LOG_FATAL] = "fatal error: ", + [UPDATER_LOG_ERROR] = "error: ", + [UPDATER_LOG_WARN] = "warning: ", + [UPDATER_LOG_INFO ] = "", + [UPDATER_LOG_DEBUG] = "D: ", + }; + FILE *out; + va_list ap; + + if (!(log_mask & lvl) || lvl < UPDATER_LOG_NONE || lvl > UPDATER_LOG_DEBUG) + return; + + switch (lvl) { + case UPDATER_LOG_DEBUG: + case UPDATER_LOG_INFO: + out = stdout; + break; + default: + out = stderr; + break; + } + + fputs(prefix[lvl], out); + va_start(ap, fmt); + vfprintf(out, fmt, ap); + va_end(ap); + fputc('\n', out); + fflush(out); +} + + +#ifdef __REFKIT_UPDATER__ +static void log_handler(const gchar *domain, GLogLevelFlags level, + const gchar *message, gpointer user_data) +{ + static int map[] = { + [G_LOG_LEVEL_CRITICAL] = UPDATER_LOG_FATAL, + [G_LOG_LEVEL_ERROR] = UPDATER_LOG_ERROR, + [G_LOG_LEVEL_WARNING] = UPDATER_LOG_WARN, + [G_LOG_LEVEL_MESSAGE] = UPDATER_LOG_INFO, + [G_LOG_LEVEL_INFO] = UPDATER_LOG_INFO, + [G_LOG_LEVEL_DEBUG] = UPDATER_LOG_DEBUG, + }; + int fatal, lvl; + + UNUSED_VAR(user_data); + + fatal = level & G_LOG_FLAG_FATAL; + level &= G_LOG_LEVEL_MASK; + + if (level < 0 || level >= (int)(sizeof(map) / sizeof(map[0]))) + return; + + if (fatal) + lvl = UPDATER_LOG_FATAL; + else + lvl = map[level]; + + if (lvl == UPDATER_LOG_DEBUG) + log_debug("[%s] %s", domain, message); + else + log_msg(lvl, "%s", message); +} +#endif + + +static void set_defaults(context_t *c, const char *argv0) +{ + if (isatty(fileno(stdout))) + log_mask = UPDATER_LOG_CONSOLE; + else + log_mask = UPDATER_LOG_DAEMON; + + memset(c, 0, sizeof(*c)); + c->mode = UPDATER_MODE_DEFAULT; + c->interval = UPDATER_INTERVAL; + c->hook_apply = UPDATER_HOOK_APPLY; + c->argv0 = argv0; + c->distro = UPDATER_DISTRO; + c->prefix = UPDATER_PREFIX; + c->nentry = 0; + c->latest = -1; + c->running = -1; +} + + +#define OPTION_FETCH "-F/--fetch-only" +#define OPTION_APPLY "-A/--apply-only" +#define OPTION_ENTRIES "-b/boot-entries" +#define OPTION_RUNNING "-r/running-entry" +#define OPTION_LATEST "-L/latest-entry" +#define OPTION_PATCH "-p/patch-procfs" +#define OPTION_PREPARE "-I/--prepare-root" + +static const char *mode_option(int mode) +{ + switch (mode) { + case UPDATER_MODE_FETCH: return OPTION_FETCH; + case UPDATER_MODE_APPLY: return OPTION_APPLY; + case UPDATER_MODE_ENTRIES: return OPTION_ENTRIES; + case UPDATER_MODE_RUNNING: return OPTION_RUNNING; + case UPDATER_MODE_LATEST: return OPTION_LATEST; + case UPDATER_MODE_PATCH: return OPTION_PATCH; + case UPDATER_MODE_PREPARE: return OPTION_PREPARE; + default: return "WTF?"; + } +} + + +static void set_mode(context_t *c, int mode) +{ + if (c->mode) + log_warn("multiple modes specified (%s, %s), using last one", + mode_option(c->mode), mode_option(mode)); + + c->mode = mode; +} + + +static void print_usage(const char *argv0, int exit_code, const char *fmt, ...) +{ + va_list ap; + context_t c; + + if (fmt != NULL) { + va_start(ap, fmt); + vfprintf(stderr, fmt, ap); + fputc('\n', stderr); + va_end(ap); + } + + fprintf(stderr, "usage: %s [options]\n" + "\n" + "The possible options are:\n" + " -b, --boot-entries list boot entries\n" + " -r, --running-entry show running entry\n" + " -L, --latest-entry show latest local available entry\n" + " -p, --patch-procfs patch /proc/cmdline\n" + " -I, --prepare-root prepare root (from initramfs)\n" + " -s, --shell list/show as shell assignment\n" + " -S, --shell-export use export in shell assignments\n" + " -V, --prefix variable prefix in assignments\n" +#ifdef __REFKIT_UPDATER__ + " -F, --fetch-only fetch without applying updates\n" + " -A, --apply-only don't fetch, apply cached updates\n" + " -O, --one-shot run once, then exit\n" + " -i, --check-interval update check interval (in seconds)\n" + " -P, --post-apply-hook PATH script to run after an update\n" +#endif + " -l, --log LEVELS set logging levels\n" + " -v, --verbose increase loggin verbosity\n" + " -d, --debug [DOMAINS] enable given debug domains or all\n" + " -h, --help print this help on usage\n", + argv0); + + set_defaults(&c, argv0); + +#ifdef __REFKIT_UPDATER__ + fprintf(stderr, "\nThe defaults are:\n" + " distro name: %s\n" + " post-apply hook: %s\n" + " shell variable prefix: %s\n" + " check interval: %d\n", + c.distro, + c.hook_apply, + c.prefix, + c.interval); +#endif + + exit(exit_code); +} + + +static int parse_log_levels(const char *levels) +{ + const char *l, *e, *n; + int c, mask; + + if (!strcmp(levels, "none")) + return UPDATER_LOG_NONE; + if (!strcmp(levels, "all")) + return UPDATER_LOG_ALL; + + for (mask = 0, l = levels; l != NULL; l = n) { + e = strchr(l, ','); + if (e == NULL) + n = NULL; + else + n = e + 1; + + if ((c = e - l) == 0) + continue; + + switch (c) { + case 4: + if (!strncmp(l, "none", 4)) + continue; + else if (!strncmp(l, "info", 4)) + mask |= UPDATER_LOG_INFO; + else if (!strncmp(l, "warn", 4)) + mask |= UPDATER_LOG_WARN; + else + goto ignore_unknown; + break; + + case 5: + if (!strncmp(l, "debug", 5)) + mask |= UPDATER_LOG_DEBUG; + else if (!strncmp(l, "error", 5)) + mask |= UPDATER_LOG_ERROR; + else if (!strncmp(l, "fatal", 5)) + mask |= UPDATER_LOG_FATAL; + else + goto ignore_unknown; + break; + + case 6: + if (!strncmp(l, "daemon", 6)) + mask |= UPDATER_LOG_DAEMON; + else + goto ignore_unknown; + break; + + case 7: + if (!strncmp(l, "console", 7)) + mask |= UPDATER_LOG_CONSOLE; + else + goto ignore_unknown; + break; + + default: + ignore_unknown: + log_error("unknown log level %*.*s", c, c, l); + return log_mask; + } + } + + return mask; +} + + +static void enable_debug_domains(char **domains) +{ + static char debug[1024]; + char **dom, *p; + const char *t; + int l, n; + + p = debug; + l = sizeof(debug); + for (dom = domains, t = ""; *dom && l > 0; dom++, t = ",") { + n = snprintf(p, l, "%s%s", t, *dom); + + if (n < 0 || n >= l) { + *p = '\0'; + l = 0; + } + else { + p += n; + l -= n; + } + } + + log_mask |= UPDATER_LOG_DEBUG; + + log_debug("enabling debug domains '%s'", debug); + setenv("G_MESSAGES_DEBUG", debug, TRUE); +} + + +static int parse_boot_entry(FILE *fp, boot_entry_t *b) +{ + char line[512], path[PATH_MAX], *p, *e; + int l; + + free(b->options); + free(b->boot); + free(b->deployment); + b->options = b->boot = b->deployment = NULL; + + b->version = 0; + b->device = 0; + b->inode = 0; + + while (fgets(line, sizeof(line), fp) != NULL) { + log_debug("read config entry line '%s'"); + + if (!strncmp(line, "options ", 8)) { + p = line + 8; + e = strchr(line, '\n'); + l = e ? e - p : (int)strlen(p); + + if ((b->options = malloc(l + 1)) == NULL) + goto nomem; + + strncpy(b->options, p, l); + b->options[l] = '\0'; + + if (b->version) + break; + else + continue; + } + + if (!strncmp(line, "version ", 8)) { + p = line + 8; + + b->version = (int)strtoul(p, NULL, 10); + + if (b->options) + break; + else + continue; + } + } + + if (!b->version) + goto missing_version; + + if (b->options == NULL) + goto missing_options; + + if ((p = strstr(b->options, "ostree=")) == NULL) + goto missing_ostree; + + p += 7; + + if ((e = strchr(p, ' ')) == NULL) + l = strlen(p); + else + l = e - p; + + snprintf(path, sizeof(path), "%*.*s", l, l, p); + + if ((b->boot = strdup(path)) == NULL) + goto nomem; + + return 0; + + missing_version: + log_error("missing config entry 'version'"); + return -1; + + missing_options: + log_error("missing config entry 'options'"); + return -1; + + missing_ostree: + log_error("missing ostree-entry in 'options'"); + return -1; + + nomem: + return -1; +} + + +static int resolve_boot_path(boot_entry_t *b) +{ + struct stat st; + char path[PATH_MAX], pwd[PATH_MAX], *p; + + if (stat(p = b->boot, &st) < 0 && errno == ENOENT) { + snprintf(path, sizeof(path), "/rootfs/%s", b->boot); + + if (stat(p = path, &st) < 0) + goto invalid_path; + } + + if (getcwd(pwd, sizeof(pwd)) == NULL) + goto resolve_failed; + + if (chdir(p) < 0) + goto resolve_failed; + + if (getcwd(path, sizeof(path)) == NULL) + goto resolve_failed; + + chdir(pwd); + + if (!strncmp(path, "/rootfs/", 8)) + p = path + 7; + else + p = path; + + if (!strncmp(path, "/sysroot/", 9)) + p += 8; + + b->deployment = strdup(p); + + if (b->deployment == NULL) + goto nomem; + + if (stat(path, &st) < 0) + goto resolve_failed; + + b->device = st.st_dev; + b->inode = st.st_ino; + + return 0; + + invalid_path: + log_error("failed to resolve boot path '%s'", p); + return -1; + + resolve_failed: + log_error("failed to resolve boot symlink '%s' to deployment", p); + nomem: + return -1; +} + + +static int get_boot_entries(context_t *c) +{ + boot_entry_t *buf = c->entries; + size_t size = sizeof(c->entries) / sizeof(c->entries[0]); + struct stat root; + char conf[PATH_MAX], *base; + boot_entry_t *b; + int latest, i, status; + FILE *fp = NULL; + + if (c->nentry > 0) + return c->nentry; + + if (access(base = "/boot/loader/entries", X_OK) < 0) { + if (errno == ENOENT) { + if (access(base = "/rootfs/boot/loader/entries", X_OK) < 0) { + if (errno == ENOENT) + goto no_entries; + + goto get_failed; + } + } + else + goto get_failed; + } + + if (stat("/", &root) < 0) + memset(&root, 0, sizeof(root)); + + c->latest = c->running = latest = -1; + + for (i = 0, b = buf; i < 2; i++, b++) { + if (i >= (int)size) + goto no_buf; + + memset(b, 0, sizeof(*b)); + b->id = i; + + snprintf(conf, sizeof(conf), "%s/ostree-%s-%d.conf", base, c->distro, i); + + if ((fp = fopen(conf, "r")) == NULL) { + if (i == 0) + goto get_failed; + else + break; + } + + log_debug("parsing config file '%s'...", conf); + status = parse_boot_entry(fp, b); + + fclose(fp); + fp = NULL; + + if (status < 0) + goto invalid_entry; + + if (resolve_boot_path(b) < 0) + goto invalid_entry; + + if (b->version > latest) { + c->latest = i; + latest = b->version; + } + + if (b->device == root.st_dev && b->inode == root.st_ino) + c->running = i; + } + + if (i < (int)size - 1) + memset(buf + i, 0, sizeof(*buf)); + + return (c->nentry = i); + + no_entries: + get_failed: + log_error("failed to find any boot loader entries"); + if (fp) + fclose(fp); + return -1; + + invalid_entry: + log_error("invalid entry, failed to parse '%s'", conf); + + no_buf: + errno = ENOBUFS; + return -1; +} + + +static void parse_cmdline(context_t *c, int argc, char **argv) +{ +#ifdef __REFKIT_UPDATER__ +# define UPDATER_OPTIONS "FAOi:P:R" +# define UPDATER_ENTRIES \ + { "fetch-only" , no_argument , NULL, 'F' }, \ + { "apply-only" , no_argument , NULL, 'A' }, \ + { "one-shot" , no_argument , NULL, 'O' }, \ + { "check-interval" , required_argument, NULL, 'i' }, \ + { "post-apply-hook", required_argument, NULL, 'P' } +#else +# define UPDATER_OPTIONS "" +# define UPDATER_ENTRIES { NULL, 0, NULL, 0 } +#endif + +# define OPTIONS "-brLpIsSV:l:vd::h"UPDATER_OPTIONS + static struct option options[] = { + { "boot-entries" , no_argument , NULL, 'b' }, + { "running-entry" , no_argument , NULL, 'r' }, + { "latest-entry" , no_argument , NULL, 'L' }, + { "patch-procfs" , no_argument , NULL, 'p' }, + { "prepare-root" , no_argument , NULL, 'I' }, + { "shell" , no_argument , NULL, 's' }, + { "shell-export" , no_argument , NULL, 'S' }, + { "prefix" , required_argument, NULL, 'V' }, + { "log" , required_argument, NULL, 'l' }, + { "verbose" , no_argument , NULL, 'v' }, + { "debug" , optional_argument, NULL, 'd' }, + { "help" , no_argument , NULL, 'h' }, + UPDATER_ENTRIES , + { NULL, 0, NULL, 0 } + }; + static char *domains[32] = { [0 ... 31] = NULL }; + int ndomain = 0; + + int opt, vmask, lmask; +#ifdef __REFKIT_UPDATER__ + char *e; +#endif + + set_defaults(c, argv[0]); + lmask = 0; + vmask = log_mask; + + while ((opt = getopt_long(argc, argv, OPTIONS, options, NULL)) != -1) { + switch (opt) { + case 'b': + set_mode(c, UPDATER_MODE_ENTRIES); + break; + + case 'r': + set_mode(c, UPDATER_MODE_RUNNING); + break; + + case 'L': + set_mode(c, UPDATER_MODE_LATEST); + break; + + case 'p': + set_mode(c, UPDATER_MODE_PATCH); + break; + + case 'I': + set_mode(c, UPDATER_MODE_PREPARE); + break; + + case 's': + c->print = PRINT_SHELL_EVAL; + break; + + case 'S': + c->print = PRINT_SHELL_EXPORT; + break; + + case 'V': + c->prefix = optarg; + break; + +#ifdef __REFKIT_UPDATER__ + case 'F': + set_mode(c, UPDATER_MODE_FETCH); + break; + + case 'A': + set_mode(c, UPDATER_MODE_APPLY); + break; + + case 'O': + c->oneshot = 1; + break; + + case 'i': + c->interval = strtol(optarg, &e, 10); + if (e && *e) + log_fatal("invalid update check interval '%s'", optarg); + break; + + case 'P': + c->hook_apply = optarg; + break; +#endif + + case 'l': + lmask = parse_log_levels(optarg); + break; + + case 'v': + vmask <<= 1; + vmask |= 1; + break; + + case 'd': + if (optarg == NULL || (optarg[0] == '*' && optarg[1] == '\0')) + optarg = "all"; + + if (ndomain < (int)(sizeof(domains) / sizeof(domains[0])) - 1) + domains[ndomain++] = optarg; + else + log_warn("too many debug domains, ignoring '%s'...", optarg); + break; + + case 'h': + print_usage(argv[0], 0, ""); + + case '?': + print_usage(argv[0], EINVAL, "invalid option"); + break; + } + } +#undef OPTIONS + + if (!c->mode) + c->mode = UPDATER_MODE_UPDATE; + + if (vmask && lmask) + log_warn("both -v and -l options used to change logging level..."); + + log_mask = vmask | lmask | UPDATER_LOG_FATAL; + + if (ndomain > 0) + enable_debug_domains(domains); +} + + +#ifdef __REFKIT_UPDATER__ +static void updater_init(context_t *c, const char *argv0) +{ + GCancellable *gcnc = NULL; + GError *gerr = NULL; + + g_set_prgname(argv0); + g_setenv("GIO_USE_VFS", "local", TRUE); + g_log_set_handler(G_LOG_DOMAIN, G_LOG_LEVEL_MESSAGE, log_handler, NULL); + + c->repo = ostree_repo_new_default(); + + if (!ostree_repo_open(c->repo, gcnc, &gerr)) + log_fatal("failed to open OSTree repository (%s)", gerr->message); +} + + +static pid_t updater_invoke(char **argv, redirfd_t *rfd) +{ + pid_t pid; + redirfd_t *r; + int i, fd; + + switch ((pid = fork())) { + case -1: + log_error("failed to fork to exec '%s'", argv[0]); + return -1; + + case 0: + /* + * child + * - close file descriptors skip the ones we will be dup2'ing + * - do filedescriptor redirections + * - exec + */ + + for (i = 0; i < sysconf(_SC_OPEN_MAX); i++) { + fd = i; + + if (fd == fileno(stdout) && (log_mask & UPDATER_LOG_DEBUG)) + continue; + + if (rfd != NULL) { + for (r = rfd; r->parent >= 0 && fd >= 0; r++) + if (r->parent == i) + fd = -1; + } + + if (fd >= 0) + close(fd); + } + + if (rfd != NULL) { + for (r = rfd; r->parent >= 0; r++) { + if (rfd->parent == rfd->child) + continue; + + log_debug("redirecting child fd %d -> %d", r->child, r->parent); + + dup2(r->parent, r->child); + close(r->parent); + } + } + + if (execv(argv[0], argv) < 0) { + log_error("failed to exec '%s' (%d: %s)", argv[0], + errno, strerror(errno)); + exit(-1); + } + break; + + default: + /* + * parent + * - close file descriptor we'll be using on the child side + */ + + if (rfd != NULL) { + for (r = rfd; r->parent >= 0; r++) { + log_debug("closing parent fd %d", r->parent); + close(r->parent); + } + } + + break; + } + + return pid; +} + + +static int updater_block_shutdown(context_t *c) +{ +# define RD 0 +# define WR 1 + + char *argv[16], *path; + int argc, pipefds[2]; + redirfd_t rfd[2]; + + if (c->inhibit_pid > 0) + return 0; + + if (access((path = "/usr/bin/systemd-inhibit"), X_OK) != 0) + if (access((path = "/bin/systemd-inhibit"), X_OK) != 0) + goto no_inhibit; + + log_debug("using %s to block system shutdown/reboot...", path); + + /* + * systemd-inhibit --what=shutdown --who=ostree-updater \ + * --why='pulling/applying system update' --mode=block \ + * /bin/sh -c "read foo; exit 0" + */ + + argc = 0; + argv[argc++] = path; + argv[argc++] = "--what=shutdown"; + argv[argc++] = "--who=ostree-update"; + argv[argc++] = "--why=pulling/applying system update"; + argv[argc++] = "--mode=block"; + argv[argc++] = "/bin/sh"; + argv[argc++] = "-c"; + argv[argc++] = "read foo"; + argv[argc++] = NULL; + + if (pipe(pipefds) < 0) + goto pipe_err; + + rfd[0].parent = pipefds[RD]; + rfd[0].child = fileno(stdin); + rfd[1].parent = rfd[1].child = -1; + c->inhibit_fd = pipefds[WR]; + + log_info("activating shutdown-inhibitor..."); + + c->inhibit_pid = updater_invoke(argv, rfd); + + if (c->inhibit_pid < 0) { + close(pipefds[WR]); + c->inhibit_fd = -1; + + return -1; + } + + return 0; + + no_inhibit: + log_error("failed to find an executable systemd-inhibit"); + return -1; + + pipe_err: + log_error("failed to create pipe for systemd-inhibit"); + return -1; + +#undef RD +#undef WR +} + + +static void updater_allow_shutdown(context_t *c) +{ + pid_t pid; + int cnt, ec; + + if (!c->inhibit_pid && c->inhibit_fd < 0) { + c->inhibit_pid = 0; + c->inhibit_fd = -1; + + return; + } + + log_info("deactivating shutdown-inhibitor..."); + + close(c->inhibit_fd); + c->inhibit_fd = -1; + + usleep(10 * 1000); + + cnt = 0; + while ((pid = waitpid(c->inhibit_pid, &ec, WNOHANG)) != c->inhibit_pid) { + if (cnt++ < 5) + usleep(250 * 1000); + else + break; + } + + if (pid <= 0) { + log_warn("Hmm... hammering inhibitor child (%u)...", c->inhibit_pid); + kill(c->inhibit_pid, SIGKILL); + } + + c->inhibit_pid = 0; + c->inhibit_fd = -1; +} + + +static int updater_prepare(context_t *c) +{ + GCancellable *gcnc = NULL; + GError *gerr = NULL; + gboolean locked = FALSE; + + if (c->sysroot == NULL) + c->sysroot = ostree_sysroot_new(NULL); + + if (!ostree_sysroot_load(c->sysroot, gcnc, &gerr)) + goto load_failure; + + if (!ostree_sysroot_try_lock(c->sysroot, &locked, &gerr)) + goto lock_failure; + + if (!locked) + return 0; + + if (updater_block_shutdown(c) < 0) + goto block_failure; + + c->u = ostree_sysroot_upgrader_new_for_os(c->sysroot, NULL, gcnc, &gerr); + + if (c->u == NULL) + goto no_upgrader; + + return 1; + + load_failure: + log_error("failed to load OSTree sysroot (%s)", gerr->message); + return -1; + + lock_failure: + log_error("failed to lock OSTree sysroot (%s)", gerr->message); + return -1; + + block_failure: + log_error("failed to block shutdown"); + return -1; + + no_upgrader: + log_error("failed to create OSTree upgrader (%s)", gerr->message); + updater_allow_shutdown(c); + return -1; +} + + +static void updater_cleanup(context_t *c) +{ + if (c->sysroot) + ostree_sysroot_unlock(c->sysroot); + + if (c->u) { + g_object_unref(c->u); + c->u = NULL; + } + + updater_allow_shutdown(c); +} + + +static int updater_post_apply_hook(context_t *c, const char *o, const char *n) +{ +# define TIMEOUT 60 + + char *argv[8]; + int argc, cnt; + redirfd_t rfd[3]; + pid_t pid, ec, status; + + if (!*c->hook_apply) + goto no_hook; + + if (access(c->hook_apply, X_OK) < 0) + goto no_access; + + argc = 0; + argv[argc++] = (char *)c->hook_apply; + if (o != NULL && n != NULL) { + argv[argc++] = (char *)o; + argv[argc++] = (char *)n; + } + argv[argc] = NULL; + + rfd[0].parent = rfd[0].child = fileno(stdout); + rfd[1].parent = rfd[1].child = fileno(stderr); + rfd[2].parent = rfd[2].child = -1; + + pid = updater_invoke(argv, rfd); + + if (pid <= 0) + return -1; + + log_info("waiting for post-apply hook (%s) to finish...", c->hook_apply); + + cnt = 0; + while ((status = waitpid(pid, &ec, WNOHANG)) != pid) { + if (cnt++ < TIMEOUT) + sleep(1); + else + break; + } + + if (status != pid) + goto timeout; + + if (!WIFEXITED(ec)) + goto hook_error; + + if (WEXITSTATUS(ec) != 0) + goto hook_failure; + + log_info("post-apply hook (%s) succeeded", c->hook_apply); + return 0; + + no_hook: + return 0; + + no_access: + log_error("can't execute post-apply hook '%s'", c->hook_apply); + return -1; + + timeout: + log_error("post-apply hook (%s) didn't finish in %d seconds", + c->hook_apply, TIMEOUT); + return -1; + + hook_error: + log_error("post-apply hook (%s) exited abnormally", c->hook_apply); + return -1; + + hook_failure: + log_error("post-apply hook (%s) failed with status %d", c->hook_apply, + WEXITSTATUS(ec)); + return -1; + +# undef TIMEOUT +} + + +static int updater_fetch(context_t *c) +{ + GCancellable *gcnc = NULL; + GError *gerr = NULL; + int flg = 0; + int changed; + const char *src; + + if (!(c->mode & UPDATER_MODE_FETCH)) { + flg = OSTREE_SYSROOT_UPGRADER_PULL_FLAGS_SYNTHETIC; + src = "local repository"; + } + else + src = "server"; + + log_info("polling OSTree %s for available updates...", src); + + if (!ostree_sysroot_upgrader_pull(c->u, 0, flg, NULL, &changed, gcnc, &gerr)) + goto pull_failed; + + if (!changed) + log_info("no updates pending"); + else + log_info("updates fetched successfully"); + + return changed; + + pull_failed: + log_error("failed to poll %s for updates (%s)", src, gerr->message); + if (!(c->mode & UPDATER_MODE_APPLY)) /* mimick stock ostree logic */ + ostree_sysroot_cleanup(c->sysroot, NULL, NULL); + return -1; +} + + +static int updater_apply(context_t *c) +{ + GCancellable *gcnc = NULL; + GError *gerr = NULL; + const char *prev = NULL; + const char *curr = NULL; + + if (!(c->mode & UPDATER_MODE_APPLY)) + return 0; + + if (!ostree_sysroot_upgrader_deploy(c->u, gcnc, &gerr)) + goto deploy_failure; + + log_info("OSTree updates applied"); + + if (get_boot_entries(c) < 0 || c->latest < 0) + goto entry_failure; + + if (c->running >= 0) + prev = c->entries[c->running].deployment; + else + prev = ""; + + curr = c->entries[c->latest].deployment; + + log_info("updated from %s to %s", *prev ? prev : "unknown", curr); + + if (updater_post_apply_hook(c, prev, curr) < 0) + goto hook_failure; + + return 1; + + deploy_failure: + log_error("failed to deploy OSTree updates locally (%s)", gerr->message); + return -1; + + entry_failure: + log_error("failed to determine post-update boot entries"); + return -1; + + hook_failure: + log_error("update post-apply hook failed"); + return -1; +} + + +static int updater_run(context_t *c) +{ + int status; + + if (updater_prepare(c) <= 0) + return -1; + + if ((status = updater_fetch(c)) > 0) + status = updater_apply(c); + + updater_cleanup(c); + + return status; +} + + +static void updater_loop(context_t *c) +{ + int updates; + + /* + * Notes: + * + * This is extremely simplistic now. Since ostree uses heavily + * gobjects/GMainLoop we could easily/perhaps should switch + * to using GMainLoop. + */ + + for (;;) { + updates = updater_run(c); + + if (c->oneshot) + break; + + switch (updates) { + case 0: /* no updates available */ + sleep(c->interval); + break; + + case 1: /* updates fetched and applied, we're done until a reboot */ + exit(0); + + default: + sleep(30); + break; + } + } +} + + + +static void updater_exit(context_t *c) +{ + UNUSED_VAR(c); +} + +#endif /* __REFKIT_UPDATER__ */ + + +static void print_entries(context_t *c) +{ + boot_entry_t *e; + int i; + const char *exp; + + if (get_boot_entries(c) < 0) + exit(1); + + exp = (c->print == PRINT_SHELL_EXPORT ? "export " : ""); + + if (c->print != PRINT_HUMAN_READABLE) { + printf("%s%s_BOOT_ENTRIES=%d\n", exp, c->prefix, c->nentry); + printf("%s%s_RUNNING_ENTRY=%d\n", exp, c->prefix, c->running); + printf("%s%s_LATEST_ENTRY=%d\n", exp, c->prefix, c->latest); + } + + for (i = 0, e = c->entries; i < c->nentry; i++, e++) { + switch (c->print) { + case PRINT_HUMAN_READABLE: + default: + printf("boot entry #%d:\n", i); + printf(" id: %d\n", e->id); + printf(" version: %d\n", e->version); + printf(" options: '%s'\n", e->options); + printf(" boot: '%s'\n", e->boot); + printf(" deployment: '%s'\n", e->deployment); + printf(" dev/ino: 0x%lx/0x%lx\n", e->device, e->inode); + break; + + case PRINT_SHELL_EVAL: + case PRINT_SHELL_EXPORT: + printf("%s%s_BOOT%d_VERSION=%d\n", exp, c->prefix, i, e->version); + printf("%s%s_BOOT%d_OPTIONS='%s'\n", exp, c->prefix, i, e->options); + printf("%s%s_BOOT%d_PATH='%s'\n", exp, c->prefix, i, e->deployment); + printf("%s%s_BOOT%d_DEVICE=0x%lx\n", exp, c->prefix, i, e->device); + printf("%s%s_BOOT%d_INODE=%lu\n", exp, c->prefix, i, e->inode); + break; + } + } + + exit(0); +} + + +static void print_running(context_t *c) +{ + boot_entry_t *e; + const char *exp; + + if (get_boot_entries(c) < 0) + exit(1); + + if (c->running < 0) + exit(1); + + e = c->entries + c->running; + exp = (c->print == PRINT_SHELL_EXPORT ? "export " : ""); + + switch (c->print) { + case PRINT_HUMAN_READABLE: + default: + printf("running entry #%d:\n", c->running); + printf(" id: %d\n", e->id); + printf(" version: %d\n", e->version); + printf(" options: '%s'\n", e->options); + printf(" boot: '%s'\n", e->boot); + printf(" deployment: '%s'\n", e->deployment); + printf(" dev/ino: 0x%lx/0x%lx\n", e->device, e->inode); + break; + + case PRINT_SHELL_EVAL: + case PRINT_SHELL_EXPORT: + printf("%s%s_BOOTED_VERSION=%d\n", exp, c->prefix, e->version); + printf("%s%s_BOOTED_OPTIONS='%s'\n", exp, c->prefix, e->options); + printf("%s%s_BOOTED_PATH='%s'\n", exp, c->prefix, e->deployment); + printf("%s%s_BOOTED_DEVICE=0x%lx\n", exp, c->prefix, e->device); + printf("%s%s_BOOTED_INODE=%lu\n", exp, c->prefix, e->inode); + break; + } + + exit(0); +} + + +static void print_latest(context_t *c) +{ + boot_entry_t *e; + const char *exp; + + if (get_boot_entries(c) < 0) + exit(1); + + if (c->latest < 0) + exit(1); + + e = c->entries + c->latest; + exp = (c->print == PRINT_SHELL_EXPORT ? "export " : ""); + + switch (c->print) { + case PRINT_HUMAN_READABLE: + default: + printf("latest entry #%d:\n", c->running); + printf(" id: %d\n", e->id); + printf(" version: %d\n", e->version); + printf(" options: '%s'\n", e->options); + printf(" boot: '%s'\n", e->boot); + printf(" deployment: '%s'\n", e->deployment); + printf(" dev/ino: 0x%lx/0x%lx\n", e->device, e->inode); + break; + + case PRINT_SHELL_EVAL: + case PRINT_SHELL_EXPORT: + printf("%s%s_LATEST_VERSION=%d\n", exp, c->prefix, e->version); + printf("%s%s_LATEST_OPTIONS='%s'\n", exp, c->prefix, e->options); + printf("%s%s_LATEST_PATH='%s'\n", exp, c->prefix, e->deployment); + printf("%s%s_LATEST_DEVICE=0x%lx\n", exp, c->prefix, e->device); + printf("%s%s_LATEST_INODE=%lu\n", exp, c->prefix, e->inode); + break; + } + + exit(0); +} + + +const char *full_path(char *buf, path_t *path) +{ + int n; + + n = snprintf(buf, PATH_MAX, "%s%s%s%s%s", + path->prefix ? path->prefix : "", + path->root && *path->root != '/' ? "/" : "", + path->root ? path->root : "", + path->path && *path->path != '/' ? "/" : "", + path->path ? path->path : ""); + + if (n < 0 || n >= PATH_MAX) + return ""; + + return buf; +} + + +static int bind_mount(path_t *s, path_t *d) +{ + const char *src, *dst; + char srcbuf[PATH_MAX], dstbuf[PATH_MAX]; + + src = full_path(srcbuf, s); + dst = full_path(dstbuf, d); + + log_info("bind-mounting %s to %s", src, dst); + + return mount(src, dst, NULL, MS_BIND, NULL); +} + + +static int move_mount(path_t *s, path_t *d) +{ + const char *src, *dst; + char srcbuf[PATH_MAX], dstbuf[PATH_MAX]; + + src = full_path(srcbuf, s); + dst = full_path(dstbuf, d); + + log_info("move-mounting %s to %s", src, dst); + + return mount(src, dst, NULL, MS_MOVE, NULL); +} + + +static int make_movable(const char *root, const char *dir) +{ + path_t path = { NULL, root, dir }; + + return bind_mount(&path, &path); +} + + +static int prepare_root(const char *root) +{ + struct { + path_t src; + path_t dst; + } mounts[] = { + { { "/rootfs", root , "../../var" }, { "/rootfs", root, "var" } }, + { { "/rootfs", "boot", NULL }, { "/rootfs", root, "boot" } }, + { { "/rootfs", "home", NULL }, { "/rootfs", root, "home" } }, + { { NULL, NULL, NULL }, { NULL, NULL, NULL } }, + }, *m; + + for (m = mounts; m->src.prefix || m->src.root || m->src.path; m++) + if (bind_mount(&m->src, &m->dst) < 0) + return -1; + + return 0; +} + + +static int shuffle_root(const char *root) +{ + struct { + path_t src; + path_t dst; + } mounts[] = { + { { "/rootfs" , root, NULL }, { "/sysroot.tmp", NULL, NULL } }, + { { "/rootfs" , NULL, NULL }, { "/sysroot.tmp", "sysroot", NULL } }, + { { "/sysroot.tmp", NULL, NULL }, { "/rootfs" , NULL, NULL } }, + { { NULL, NULL, NULL }, { NULL, NULL, NULL } }, + }, *m; + + if (mkdir("/sysroot.tmp", 0755) < 0 && errno != EEXIST) + return -1; + + for (m = mounts; m->src.prefix || m->src.root || m->src.path; m++) + if (move_mount(&m->src, &m->dst) < 0) + return -1; + + return 0; +} + + +static void initramfs_prepare(context_t *c) +{ + boot_entry_t *boot; + + if (get_boot_entries(c) < 0) + log_fatal("failed to determine boot entries"); + + if (c->latest < 0 || c->latest >= c->nentry) + log_fatal("failed to discover latest boot entry"); + + boot = c->entries + c->latest; + + if (make_movable("/rootfs", boot->deployment) < 0) + log_fatal("failed to make '/rootfs/%s' movable (%d: %s)", + boot->deployment, errno, strerror(errno)); + + if (prepare_root(boot->deployment) < 0) + log_fatal("failed to prepare ostree root '%s' (%d: %s)", + boot->deployment, errno, strerror(errno)); + + if (shuffle_root(boot->deployment) < 0) + log_fatal("failed to shuffle ostree root '%s' (%d: %s)", + boot->deployment, errno, strerror(errno)); +} + + +static void patch_procfs(context_t *c) +{ + boot_entry_t *boot; + char cmdline[4096], *p; + const char *orig, *patched; + int n, l, cnt, fd, nl; + + if (get_boot_entries(c) < 0) + exit(1); + + if (c->running < 0) + exit(1); + + boot = c->entries + c->running; + + if ((fd = open(orig = "/proc/cmdline", O_RDONLY)) < 0) + exit(1); + + if ((n = read(fd, cmdline, sizeof(cmdline))) < 0) + exit(1); + + close(fd); + + if (n >= (int)sizeof(cmdline) - 1) + exit(1); + + nl = 0; + while (n > 0 && cmdline[n - 1] == '\n') { + n--; + nl = 1; + } + + cmdline[n] = '\0'; + + if (strstr(cmdline, " ostree=") || strstr(cmdline, "ostree=") == cmdline) + exit(0); + + l = sizeof(cmdline) - n - 1; + + p = cmdline + n; + cnt = snprintf(p, l, " ostree=%s%s", boot->boot, nl ? "\n" : ""); + + if (cnt < 0 || cnt > l) + exit(1); + + n += cnt; + + fd = open(patched = "/run/cmdline.patched", + O_CREAT | O_TRUNC | O_WRONLY, 0644); + + if (fd < 0) + exit(1); + + p = cmdline; + l = n; + while (l > 0) { + n = write(fd, p, l); + + if (n < 0 && !(errno == EINTR || errno == EAGAIN)) + exit(1); + + p += n; + l -= n; + } + + close(fd); + + if (mount(patched, orig, NULL, MS_BIND|MS_RDONLY, NULL) < 0) + exit(1); + + unlink(patched); + exit(0); +} + + +int main(int argc, char *argv[]) +{ + context_t c; + + setlocale(LC_ALL, ""); + + parse_cmdline(&c, argc, argv); + + switch (c.mode) { + case UPDATER_MODE_ENTRIES: + print_entries(&c); + break; + + case UPDATER_MODE_RUNNING: + print_running(&c); + break; + + case UPDATER_MODE_LATEST: + print_latest(&c); + break; + + case UPDATER_MODE_PATCH: + patch_procfs(&c); + break; + + case UPDATER_MODE_PREPARE: + initramfs_prepare(&c); + break; + +#ifdef __REFKIT_UPDATER__ + case UPDATER_MODE_FETCH: + case UPDATER_MODE_APPLY: + case UPDATER_MODE_UPDATE: + updater_init(&c, argv[0]); + updater_loop(&c); + updater_exit(&c); + break; +#endif + + default: + exit(-1); + } + + return 0; +} + diff --git a/meta-refkit-core/recipes-ostree/refkit-ostree/refkit-ostree.bb b/meta-refkit-core/recipes-ostree/refkit-ostree/refkit-ostree.bb new file mode 100644 index 0000000000..7ef428a937 --- /dev/null +++ b/meta-refkit-core/recipes-ostree/refkit-ostree/refkit-ostree.bb @@ -0,0 +1,42 @@ +SUMMARY = "IoT RefKit ostree helper, scripts, services, et al." + +LICENSE = "BSD-3-Clause" +LIC_FILES_CHKSUM = "file://LICENSE-BSD;md5=f9f435c1bd3a753365e799edf375fc42" + +SRC_URI = " \ + file://${PN} \ +" + +DEPENDS = "ostree" + +inherit autotools pkgconfig systemd distro_features_check + +REQUIRED_DISTRO_FEATURES = "ostree systemd" + +S = "${WORKDIR}/${PN}" + +PACKAGES += "${PN}-initramfs" + +FILES_${PN}-initramfs = " \ + ${bindir}/refkit-ostree \ +" + +FILES_${PN} = " \ + ${bindir}/refkit-ostree-update \ + ${systemd_unitdir}/system/* \ + ${datadir}/${PN} \ +" + +# Our systemd services. +SYSTEMD_SERVICE_${PN} = " \ + refkit-patch-ostree-param.service \ + refkit-update.service \ + refkit-reboot.service \ + refkit-update-post-check.service \ +" + +EXTRA_OECONF += " \ + --with-systemdunitdir=${systemd_unitdir}/system \ +" + +RDEPENDS_${PN} += "ostree" diff --git a/meta-refkit-core/recipes-ostree/refkit-ostree/refkit-ostree_git.bb b/meta-refkit-core/recipes-ostree/refkit-ostree/refkit-ostree_git.bb deleted file mode 100644 index 834b649de2..0000000000 --- a/meta-refkit-core/recipes-ostree/refkit-ostree/refkit-ostree_git.bb +++ /dev/null @@ -1,36 +0,0 @@ -SUMMARY = "OSTree helper/wrapper scripts et al. for IoT RefKit." - -LICENSE = "BSD-3-Clause" -LIC_FILES_CHKSUM = "file://LICENSE-BSD;md5=f9f435c1bd3a753365e799edf375fc42" - -# TODO: replace with more recent implementation in C, -# move code into intel-iot-refkit? -SRC_URI = " \ - git://git@github.com/klihub/refkit-ostree-upgrade.git;protocol=http;branch=master \ -" - -SRCREV = "a196e93ed90b65f21e496aa566d17b06484fcc45" - -inherit autotools systemd distro_features_check - -REQUIRED_DISTRO_FEATURES = "ostree systemd" - -S = "${WORKDIR}/git" - -FILES_${PN} = " \ - ${bindir}/refkit-ostree \ - ${systemd_unitdir}/system/* \ -" - -# We want the following services enabled. -SYSTEMD_SERVICE_${PN} = " \ - ostree-patch-proc-cmdline.service \ - ostree-update.service \ - ostree-post-update.service \ -" - -EXTRA_OECONF += " \ - --with-systemdunitdir=${systemd_unitdir} \ -" - -RDEPENDS_${PN} += "ostree" From 287685dfe089811d241e15123f40f7b2e0e49bc4 Mon Sep 17 00:00:00 2001 From: Krisztian Litkey Date: Sun, 9 Jul 2017 17:30:37 +0300 Subject: [PATCH 08/21] initramfs-framework-ostree, refkit-initramfs: switch to C-based implementation. Switch to using the C-based implementation of refkit-ostree in the refkit ostree initramfs module for parsing the boot loader config files and picking the latest ostree deployment to boot into. Split out refkit-ostree (which is a subset of refkit-ostree-update that does not depend on libostree) into a (refkit-ostree-)initramfs subpackage Signed-off-by: Krisztian Litkey --- .../recipes-images/images/refkit-initramfs.bb | 2 +- .../initramfs-framework-ostree/files/ostree | 147 ++++-------------- 2 files changed, 27 insertions(+), 122 deletions(-) diff --git a/meta-refkit-core/recipes-images/images/refkit-initramfs.bb b/meta-refkit-core/recipes-images/images/refkit-initramfs.bb index a83b601590..dcc979d6f9 100644 --- a/meta-refkit-core/recipes-images/images/refkit-initramfs.bb +++ b/meta-refkit-core/recipes-images/images/refkit-initramfs.bb @@ -55,7 +55,7 @@ FEATURE_PACKAGES_debug = "initramfs-module-debug" IMAGE_FEATURES += " \ ${@bb.utils.contains('DISTRO_FEATURES', 'ostree', 'ostree', '', d)} \ " -FEATURE_PACKAGES_ostree = "initramfs-framework-ostree" +FEATURE_PACKAGES_ostree = "initramfs-framework-ostree refkit-ostree-initramfs" IMAGE_LINGUAS = "" diff --git a/meta-refkit-core/recipes-ostree/initramfs-framework-ostree/files/ostree b/meta-refkit-core/recipes-ostree/initramfs-framework-ostree/files/ostree index 81c6103473..1ef5e7068a 100644 --- a/meta-refkit-core/recipes-ostree/initramfs-framework-ostree/files/ostree +++ b/meta-refkit-core/recipes-ostree/initramfs-framework-ostree/files/ostree @@ -10,82 +10,21 @@ ostree_enabled () { fi } +# Prepare the latest ostree deployment for pivot-rooting it. +ostree_prepare_root () { + local _root -# Print a fatal error message, starting a shell if we're in development mode. -ostree_fatal () { - fatal "ostree: $*" -} - -# Print an info message. -ostree_info () { - msg "ostree: $*" -} - -# Print a debug message. -ostree_debug () { - debug "ostree: $*" -} - -# Discover BootLoaderSpec-compliant boot loader entries. -ostree_loader_entries () { - local _boot=${1:-/boot} - local _evar=${2:-ENTRY} - local _ovar=${3:-PATH} - local _ecurr=""; _eprev="" - local _ocurr=""; _oprev="" - local _vcurr=0; _vprev=0 - local _e _v _o - - # Discover all loader entries $1/loader/entries/ostree-*.conf. - # Parse the found ones for version and kernel command-line then - # pick the two most recent ones. - for _e in $_boot/loader/entries/ostree-*.conf; do - _v=$(grep '^version ' $_e) - _v=${_v#version } - _o=$(grep '^options ' $_e) - _o=${_o#options } - - if [ $_v -gt $_vcurr ]; then - _vprev=$_vcurr - _eprev=$_ecurr - _oprev=$_ocurr - _vcurr=$_v - _ecurr=$_e - _ocurr=$_o - elif [ $_v -gt $_vprev ]; then - _vprev=$_v - _eprev=$_e - _oprev=$_o - fi - done - - if [ -z "$_ecurr" ]; then + if ! eval $(/usr/bin/refkit-ostree --latest-entry --shell); then + msg "ERROR: failed to determine latest ostree deployment" return 1 fi - eval "${_evar}_CURR=\"$_ecurr\"" - eval "${_evar}_PREV=\"$_eprev\"" - eval "${_ovar}_CURR=\"${_ocurr#ostree=}\"" - eval "${_ovar}_PREV=\"${_oprev#ostree=}\"" + # here's our deployment root + _root="/rootfs/$REFKIT_OSTREE_LATEST_PATH" - return 0 -} - -# Make the given directories/mount points movable. -ostree_make_movable () { - for _d in $*; do - ostree_info "making $_d movable..." - mount --bind $_d $_d || return 1 - done - - return 0 -} + # make the deployment root mount-moveable + mount --bind $_root $_root || return 1 -# Fill in the missing details in an ostree deployment root. -ostree_prepare_root () { - local _root=$1 - - # # We need to prepare the ostree deployment root to be kosher for # pivot-rooting and thus eventually becoming the final root. # This involves stitching it together from several pieces to fill @@ -95,23 +34,12 @@ ostree_prepare_root () { # - bind-mount the common /boot into the deployment # - bind-mount the common /home into the deployment - ostree_info "preparing $_root for pivot-rooting..." - - cd $_root - - mount --bind ../../var var || return 1 - mount --bind /rootfs/boot $_root/boot || return 1 - mount --bind /rootfs/home $_root/home || return 1 - - cd - > /dev/null - - return 0 -} - -# Shuffle the ostree deployment around so that we eventually pivot-root it. -ostree_shuffle_root () { - local _root="$1" + mount --bind $_root/../../var $_root/var && \ + mount --bind /rootfs/boot $_root/boot && \ + mount --bind /rootfs/home $_root/home || \ + return 1 + # Finally shuffle /rootfs to $_root/sysroot and $_root to /rootfs. # # This code mimicks the last bits of ostree-prepare-root. It shuffles # /sysroot, /rootfs, and the chosen ostree deployment root ($_root) around @@ -125,46 +53,23 @@ ostree_shuffle_root () { # do these operations with an extra step to avoid trying to move the # eventual /sysroot (now /rootfs) under itself. - ostree_info "shuffling /sysroot, /rootfs and $_root for pivot-rooting..." - - cd $_root - - # make /rootfs $_root/sysroot and $_root /rootfs - mkdir -p /sysroot.tmp || return 1 - mount --move $_root /sysroot.tmp || return 1 - mount --move /rootfs sysroot || return 1 - mount --move . /rootfs || return 1 - - cd - > /dev/null + mkdir -p /sysroot.tmp && \ + mount --move $_root /sysroot.tmp && \ + mount --move /rootfs /sysroot.tmp/sysroot && \ + mount --move /sysroot.tmp /rootfs || \ + return 1 return 0 } -# Prepare the initramfs environment for pivot-rooting into an ostree deployment. -ostree_initramfs_prepare_root () { - local _entry_CURR _entry_PREV _path_CURR _path_PREV - - ostree_info "* ostree: preparing ostree rootfs for booting..." - - # Discover, and parse loader entries. - if ! ostree_loader_entries /rootfs/boot _entry _path; then - fatal_msg "failed to discover loader entries" - fi - - # Pick the ostree deployment root from the latest entry. - OSTREE_ROOT="$_path_CURR" - ostree_info "active ostree entry: $OSTREE_ROOT" - - # Prepare the deployment root for becoming the final runtime root, then - # shuffle /rootfs, /sysroot and the deployment root around so that we - # finally end up pivot-rooting into the deployment with /rootfs ending - # up as /sysroot under it. - ostree_make_movable /rootfs$OSTREE_ROOT - ostree_prepare_root /rootfs$OSTREE_ROOT - ostree_shuffle_root /rootfs$OSTREE_ROOT -} # Run the ostree image setup sequence. ostree_run () { - ostree_initramfs_prepare_root + #/usr/bin/refkit-ostree --prepare-root + + if ! ostree_prepare_root; then + fatal "ERROR: failed to prepare ostree deployment" + else + return 0 + fi } From fbbb4036badeeed80cd8af232267bc1eb5a973c6 Mon Sep 17 00:00:00 2001 From: Olev Kartau Date: Mon, 10 Jul 2017 10:05:38 +0300 Subject: [PATCH 09/21] CI tester-exec: retry daft run after unstable It makes sense to retry daft stage only after unstable result which means problem on tester. Signed-off-by: Olev Kartau --- docker/tester-exec.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docker/tester-exec.sh b/docker/tester-exec.sh index 7c0e8ca653..7f435ec447 100755 --- a/docker/tester-exec.sh +++ b/docker/tester-exec.sh @@ -111,6 +111,12 @@ testimg() { daft ${DEVICE} ${FILENAME} --record TEST_EXIT_CODE=$? + if [ "$TEST_EXIT_CODE" = 1 ]; then + echo "WARNING: daft=1 would lead to UNSTABLE: wipe results, retry daft" + rm -f *.log *.log.raw *.xml + daft ${DEVICE} ${FILENAME} --record + TEST_EXIT_CODE=$? + fi fi # delete symlinks, these point outside of local set and are useless From 4d4224be1a35f37f20c439a6d552bbebdcb2658e Mon Sep 17 00:00:00 2001 From: Olev Kartau Date: Tue, 11 Jul 2017 11:24:33 +0300 Subject: [PATCH 10/21] CI tester-create-summary: require bash in shebang We use bash constructs, but shebang was forgotten when this part was extracted from tester-exec.sh Signed-off-by: Olev Kartau --- docker/tester-create-summary.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/docker/tester-create-summary.sh b/docker/tester-create-summary.sh index 13669bf1bf..c94e97ba53 100755 --- a/docker/tester-create-summary.sh +++ b/docker/tester-create-summary.sh @@ -1,3 +1,4 @@ +#!/bin/bash -ue # # tester-create-summary.sh: tester creates summary information # Copyright (c) 2016, Intel Corporation. From 1e61b65d7971cdaf7a9c659eb3dbe486d5a6a5ba Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Wed, 12 Jul 2017 06:58:27 +0200 Subject: [PATCH 11/21] refkit_security_flags.inc: avoid FORTIFY_SOURCE for iotivity OE-core is introducing -D_FORTIFY_SOURCE=2 as default. iotivity fails to build with that because one example doesn't get the necessary -O: | /fast/build/refkit/intel-corei7-64/tmp-glibc/work/corei7-64-refkit-linux/iotivity/1.2.1-r0/recipe-sysroot/usr/include/features.h:373:4: error: #warning _FORTIFY_SOURCE requires compiling with optimization (-O) [-Werror=cpp] | # warning _FORTIFY_SOURCE requires compiling with optimization (-O) | ^~~~~~~ | cc1: all warnings being treated as errors | scons: *** [out/yocto/x86_64/release/resource/oc_logger/examples/c_test_logging.o] Error 1 | scons: building terminated because of errors. | ERROR: scons build execution failed. A better solution would be to compile all of iotivity with -O. Signed-off-by: Patrick Ohly --- .../conf/distro/include/refkit_security_flags.inc | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/meta-refkit-core/conf/distro/include/refkit_security_flags.inc b/meta-refkit-core/conf/distro/include/refkit_security_flags.inc index 3095c02f98..6b2c9ac3d7 100644 --- a/meta-refkit-core/conf/distro/include/refkit_security_flags.inc +++ b/meta-refkit-core/conf/distro/include/refkit_security_flags.inc @@ -9,7 +9,11 @@ SECURITY_CFLAGS_pn-efivar = "${SECURITY_NO_PIE_CFLAGS} ${SECURITY_PIC_CFLAGS}" SECURITY_CFLAGS_pn-zeromq = "${SECURITY_NO_PIE_CFLAGS} ${SECURITY_PIC_CFLAGS}" SECURITY_CFLAGS_pn-mraa = "${SECURITY_NO_PIE_CFLAGS} ${SECURITY_PIC_CFLAGS}" SECURITY_CFLAGS_pn-upm = "${SECURITY_NO_PIE_CFLAGS} ${SECURITY_PIC_CFLAGS}" -SECURITY_CFLAGS_pn-iotivity = "${SECURITY_NO_PIE_CFLAGS} ${SECURITY_PIC_CFLAGS}" +# -D_FORTIFY_SOURCE=2 causes compilation of oc_logger/examples/c_test_logging.o +# to fail because -O is not passed to that one. +# | # warning _FORTIFY_SOURCE requires compiling with optimization (-O) +# cc1: all warnings being treated as errors +SECURITY_CFLAGS_pn-iotivity = "${@ '${SECURITY_NO_PIE_CFLAGS} ${SECURITY_PIC_CFLAGS}'.replace('-D_FORTIFY_SOURCE=2', '') }" SECURITY_CFLAGS_pn-krb5 = "${SECURITY_NO_PIE_CFLAGS} ${SECURITY_PIC_CFLAGS}" SECURITY_CFLAGS_pn-tbb = "${SECURITY_NO_PIE_CFLAGS} ${SECURITY_PIC_CFLAGS}" SECURITY_CFLAGS_pn-protobuf = "${SECURITY_NO_PIE_CFLAGS} ${SECURITY_PIC_CFLAGS}" From cd0cc1f7e73d424207bf58395f1fc36d997f1872 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Tue, 11 Jul 2017 11:46:13 +0200 Subject: [PATCH 12/21] refkit: update bitbake and OE-core, usrmerge The usrmerge patches are now in OE-core. The corresponding changes in refkit must be removed together with updating to that version to avoid build breakages (like "/usr/usr/lib/ld-linux-x86-64.so.2: No such file or directory" in gobject-introspection). * bitbake 8d0a76f...4a14b44 (33): > knotty: Drop task prefix of PLAIN log messages > BBHandler: Remove old style bb.data.setVar() syntax usage > server/xmlrpc: Add Heartbeat event support > event: Queue offline events for the UI > server/process: Fix waitEvent() calls with 0 timeout > data: Micro performance optimisation tweak > cooker: Use multiple BuildStarted events for multiconfig > bitbake: Add MultiConfigParsed event > bitbake-user-manual: Removed and replaced broken link > bitbake-user-manual: Replaced bad link > npm fetcher: fix unknown variable name. > cache: don't insert PN into PACKAGES > toaster: test 'commit' first in get_vcs_reference > toaster: large package set breaks sqlite query > toaster: Add distro selection support > toaster: git clone progress bar > toaster: address Django-1.10 API deprecations > bitbake-selftest: add bb.tests.event to bitbake-selftest > tests: create unit tests for event module > event: remove mapping for deleted event handlers > fetch: fix handling of files with incorrect checksums from a premirror > event: drop some unused events > toaster: noweb should init database > toaster: get_last_build_id not called correctly > toaster: add getMessage to MockEvent > toaster: fail on layers with sub-layer > toaster: add ID's to build menu links > toaster: add ID's to navigation links > bitbake-user-manual: Updated BBLAYERS_FETCH_DIR variable description > cooker: ensure graceful exit after exception during BuildCompleted handler > cooker: fix always loading cache on every UI start with memres > bitbake: runqueue: multiconfig fix > bitbake:process: flush stderr/stdout to log * openembedded-core de79149...7dd5dfc (80): > oeqa/tinfoil: Improve test_wait_event for race issues > staging: Ensure a clean recipe sysroot removes addto_recipe_sysroot stamps > oeqa/sdk: Replace buildiptables for buildlzip tests > testimage: Use the renamed buildlzip > oeqa/runtime: Replace buildiptables for buildlzip on runtime tests > mirrors.bbclass: remove stale lsof ftp mirrors > lsof: update SRC_URI > lsof: minor recipe cleanup > image_types: fix kernel target on elf's image dependencies > linuxloader.bbclass: add musl libc support > vulkan: RRECOMMEND mesa drivers > mesa, gstreamer: Add "vulkan" DISTRO_FEATURE > gstreamer1.0-plugins-bad: Add vulkan PACKAGECONFIG > assimp: Add as dependency of vulkan-demos > vulkan: Upgrade 1.0.39.1 -> 1.0.51.0 > perl: Support musl-x32 build > grub-efi: Support musl-x32 > gnu-efi: Support musl-x32 build > siteinfo.bbclass: Support musl-x32 > insane.bbclass: Support musl-x32 > mesa: etnaviv: fix shader miscompilation with more than 16 labels > ovmf: Fix build with toolchain defaulting to PIE > security_flags.inc: Do not build gcc for powerpc with PIE defaults > gstreamer1.0-plugins-bad: Fix missing library with bcm egl > libunwind: We set -fPIE in security flags now if gcc is not configured for default PIE > sysklogd: Improve build and fix runtime crash > gcc: Link libssp_nonshared.a only on musl targets > gcc7: Enable static PIE > distutils,setuptools: Delete use of SECURITY_NO_PIE_CFLAGS > security_flags.inc: Delete pinnings for SECURITY_NO_PIE_CFLAGS > gcc: Introduce a knob to configure gcc to default to PIE > base: Add MultiConfigParsed handler to deal with unstable build signatures > image.bbclass: create root symlinks in nativesdk target sysroot > insane.bbclass: Add package QA check for merged /usr. > image: create symlinks needed for merged /usr > systemd: changes to support merged /usr > cross.bbclass: merged /usr support > bitbake.conf: support for merged usr with DISTRO_FEATURE usrmerge > speex: update SRC_URI > avahi-ui: reduce local pending patches > mirrors: Add HTTP mirrors for ftp://sourceware.org > local.conf.sample: drop image-swab reference > ltp: add acl, attr, curl and util-linux runtime dependencies > ltp: Reduce local Pending patches > ltp: syscalls/add_key02: fix for nonempty NULL payload > libgfortran: Add missing fincludes > libgfortran: Add missing dependency gcc-cross > systemd: Do not use xlocale.h > mesa: Upgrade to 17.1.4 release > mesa: Avoid platform probing when building without EGL > sanity.bbclass: fix AttributeError in mirror format checks > oe-pkgdata-util: package-info: Allow extra variables to be displayed > expat: upgrade to 2.2.1 > grep: upgrade to 3.1 > classes/populate_sdk_base: Fix SDK manifest generation > valgrind: Remove -no-pie from cflags > icu: Fix build with glibc 2.26 > epiphany: Fix build errors when compiling with security flags > qemu: Replace use of struct ucontext with ucontext_t > strace: upgrade to 4.17 > valgrind: Fix build with glibc 2.26 > bluez: Correct the timer count for bcm43xx firmware download > binutils: update SRCREV to fix powerpc gold link bug > yocto-compat-layer.py: make signature check code reusable > yocto-compat-layer.py: allow README with suffix > yocto-compat-layer.py: add test_world > yocto-compat-layer.py: apply test_signatures to all layers > yocto-compat-layer.py: tolerate broken world builds during signature diff > yocto-compat-layer.py: avoid adding layers more than once > sysstat:11.5.5 -> 11.5.6 > openssl: Upgrade 1.0.2k -> 1.0.2l > libepoxy: Upgrade 1.4.2 -> 1.4.3 > gtk+3: Update the patches to work with old versions of patch > gtk+3: Upgrade 3.22.15 -> 3.22.16 > gtk+3: Update UPSTREAM_CHECK_REGEX > cmake: Use find_program if find_host_program is not available > insane: remove obsolete gcc 4.5 check > sanity.bbclass: remove ASSUME_PROVIDED checks that can't succeed > meta/lib/oe/sdk.py: support added for executing pre-target commands > mkefidsk: fix bash/dash shell quoting problem Signed-off-by: Patrick Ohly merge: update --- bitbake | 2 +- meta-refkit-core/classes/refkit-image.bbclass | 40 ------------------- .../distro/include/enable-refkit-config.inc | 2 - .../conf/distro/include/refkit-config.inc | 3 ++ .../conf/distro/include/usrmerge.inc | 29 -------------- .../recipes-images/images/refkit-initramfs.bb | 9 ----- openembedded-core | 2 +- 7 files changed, 5 insertions(+), 82 deletions(-) delete mode 100644 meta-refkit-core/conf/distro/include/usrmerge.inc diff --git a/bitbake b/bitbake index 8d0a76f5a5..4a14b44b3e 160000 --- a/bitbake +++ b/bitbake @@ -1 +1 @@ -Subproject commit 8d0a76f5a595dddf16b7268bae2c00ef5f568316 +Subproject commit 4a14b44b3e4fad3a3e5e53461aa8ba9929a515b8 diff --git a/meta-refkit-core/classes/refkit-image.bbclass b/meta-refkit-core/classes/refkit-image.bbclass index 7ae98794dc..620612a96f 100644 --- a/meta-refkit-core/classes/refkit-image.bbclass +++ b/meta-refkit-core/classes/refkit-image.bbclass @@ -531,43 +531,3 @@ EOF fi } ROOTFS_POSTPROCESS_COMMAND += "refkit_image_system_serialgetty; " - -# Prepare the symlinks required for merged /usr at the time of rootfs creation. - -# The links created in rootfs are: -#/bin --> /usr/sbin -#/sbin --> /usr/sbin -#/lib --> /usr/lib -#/lib64 --> /usr/lib64 - -# We cannot make these symlinks as part of 'base-files' or some other package. -# Because at rootfs creation, installation of the package(say kernel) that -# depends on these root folders/links fails, if package manager installs this -# package prior to base-files. - -# These symbolic links in top level folder should present as long as -# - kerenl tools use /lib/{module,firmware} -# - shell scripts uses -#upstream commit waiting for review: -# http://lists.openembedded.org/pipermail/openembedded-core/2017-February/133151.html -create_merged_usr_symlinks() { - install -m 0755 -d ${IMAGE_ROOTFS}/${base_bindir} - install -m 0755 -d ${IMAGE_ROOTFS}/${base_sbindir} - install -m 0755 -d ${IMAGE_ROOTFS}/${base_libdir} - lnr ${IMAGE_ROOTFS}${base_bindir} ${IMAGE_ROOTFS}/bin - lnr ${IMAGE_ROOTFS}${base_sbindir} ${IMAGE_ROOTFS}/sbin - lnr ${IMAGE_ROOTFS}${base_libdir} ${IMAGE_ROOTFS}/${baselib} - - if [ "${nonarch_base_libdir}" != "${base_libdir}" ]; then - install -m 0755 -d ${IMAGE_ROOTFS}/${nonarch_base_libdir} - lnr ${IMAGE_ROOTFS}${nonarch_base_libdir} ${IMAGE_ROOTFS}/lib - fi - - # create base links for multilibs - multi_libdirs="${@d.getVar('MULTILIB_VARIANTS')}" - for d in $multi_libdirs; do - install -m 0755 -d ${IMAGE_ROOTFS}/${exec_prefix}/$d - lnr ${IMAGE_ROOTFS}/${exec_prefix}/$d ${IMAGE_ROOTFS}/$d - done -} -ROOTFS_PREPROCESS_COMMAND += "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', 'create_merged_usr_symlinks; ', '',d)}" diff --git a/meta-refkit-core/conf/distro/include/enable-refkit-config.inc b/meta-refkit-core/conf/distro/include/enable-refkit-config.inc index 6c2cac9b22..7468050662 100644 --- a/meta-refkit-core/conf/distro/include/enable-refkit-config.inc +++ b/meta-refkit-core/conf/distro/include/enable-refkit-config.inc @@ -16,8 +16,6 @@ VIRTUAL-RUNTIME_initscripts = "" DISTRO_FEATURES_DEFAULT_remove = "${REFKIT_DEFAULT_DISTRO_FEATURES_REMOVE}" DISTRO_FEATURES ?= "${DISTRO_FEATURES_DEFAULT} ${DISTRO_FEATURES_LIBC} ${REFKIT_DEFAULT_DISTRO_FEATURES}" -require conf/distro/include/usrmerge.inc - # Enable basic flatpak support. include conf/distro/include/flatpak.inc diff --git a/meta-refkit-core/conf/distro/include/refkit-config.inc b/meta-refkit-core/conf/distro/include/refkit-config.inc index 046c12799b..88568505e2 100644 --- a/meta-refkit-core/conf/distro/include/refkit-config.inc +++ b/meta-refkit-core/conf/distro/include/refkit-config.inc @@ -61,6 +61,9 @@ REFKIT_DEFAULT_DISTRO_FEATURES += "refkit-config" # Enable OSTree system update support. REFKIT_DEFAULT_DISTRO_FEATURES += "ostree" +# This is needed for OSTree and flatpack. +REFKIT_DEFAULT_DISTRO_FEATURES += "usrmerge" + # Remove currently unsupported distro features from global defaults REFKIT_DEFAULT_DISTRO_FEATURES_REMOVE += "x11 3g" diff --git a/meta-refkit-core/conf/distro/include/usrmerge.inc b/meta-refkit-core/conf/distro/include/usrmerge.inc deleted file mode 100644 index b6255ac233..0000000000 --- a/meta-refkit-core/conf/distro/include/usrmerge.inc +++ /dev/null @@ -1,29 +0,0 @@ -#enable merged /usr -REFKIT_DEFAULT_DISTRO_FEATURES += "usrmerge" - -# Change the configuration to point all base folder to /usr -export base_bindir = "${base_prefix}/usr/bin" -export base_sbindir = "${base_prefix}/usr/sbin" -export base_libdir = "${base_prefix}/usr/${baselib}" -export nonarch_base_libdir = "${base_prefix}/usr/lib" - -#nativesdk -base_bindir_class-nativesdk = "${SDKPATHNATIVE}${base_bindir_nativesdk}" -base_sbindir_class-nativesdk = "${SDKPATHNATIVE}${base_sbindir_nativesdk}" -base_libdir_class-nativesdk = "${SDKPATHNATIVE}${base_libdir_nativesdk}" - -target_base_libdir_class-cross = "${target_base_prefix}/usr/lib" - -# Disable split-usr support in systemd and point the rootprefix to /usr -EXTRA_OECONF_append_pn-systemd = " --disable-split-usr" -rootprefix_pn-systemd = "${exec_prefix}" - -# Most of shell scripts refer to '#!/bin/{sh,bash}' inside the script. But, when -# root folders(/bin, /lib, /sbin) merged with their /usr counterparts this path -# would be /usr/bin/{sh, bash}. The builder complains that 'no package provides -# '/bin/{sh/bash}''. So to satisfy builder adding '/bin/{sh,bash}' to bash, -# busybox package providers list. This is a temporary hack till we get a -# solution from oe-core -# Links to Upstream patches: -# http://lists.openembedded.org/pipermail/openembedded-core/2017-February/133148.html -# http://lists.openembedded.org/pipermail/openembedded-core/2017-February/133149.html diff --git a/meta-refkit-core/recipes-images/images/refkit-initramfs.bb b/meta-refkit-core/recipes-images/images/refkit-initramfs.bb index a83b601590..3008eea54f 100644 --- a/meta-refkit-core/recipes-images/images/refkit-initramfs.bb +++ b/meta-refkit-core/recipes-images/images/refkit-initramfs.bb @@ -78,12 +78,3 @@ IMA_EVM_ROOTFS_SIGNED = "-maxdepth 0 -false" IMA_EVM_ROOTFS_HASHED = "-maxdepth 0 -false" IMA_EVM_ROOTFS_CLASS = "${@bb.utils.contains('IMAGE_FEATURES', 'ima', 'ima-evm-rootfs', '',d)}" inherit ${IMA_EVM_ROOTFS_CLASS} - -create_merged_usr_links() { - mkdir -p ${IMAGE_ROOTFS}${libdir} ${IMAGE_ROOTFS}${bindir} ${IMAGE_ROOTFS}${sbindir} - lnr ${IMAGE_ROOTFS}${libdir} ${IMAGE_ROOTFS}/${baselib} - lnr ${IMAGE_ROOTFS}${bindir} ${IMAGE_ROOTFS}/bin - lnr ${IMAGE_ROOTFS}${sbindir} ${IMAGE_ROOTFS}/sbin -} -ROOTFS_PREPROCESS_COMMAND += "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', 'create_merged_usr_links;', '', d)}" - diff --git a/openembedded-core b/openembedded-core index de79149545..7dd5dfc4d5 160000 --- a/openembedded-core +++ b/openembedded-core @@ -1 +1 @@ -Subproject commit de7914954571ea8e717f56b6d6df13157b0973bc +Subproject commit 7dd5dfc4d56f1201110d947ce1ca3c6d64fbc7da From a1d6152407b4021a28d75524a723ed17f93b2cd1 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Tue, 11 Jul 2017 16:35:48 +0200 Subject: [PATCH 13/21] refkit: stop using forked yocto-compat-layer.py All of our changes have been merged into OE-core, so we can remove the local fork. Signed-off-by: Patrick Ohly --- .../lib/oeqa/selftest/cases/refkit_poky.py | 10 +- .../scripts/lib/compatlayer/__init__.py | 392 ------------------ meta-refkit/scripts/lib/compatlayer/case.py | 7 - .../scripts/lib/compatlayer/cases/__init__.py | 0 .../scripts/lib/compatlayer/cases/bsp.py | 204 --------- .../scripts/lib/compatlayer/cases/common.py | 48 --- .../scripts/lib/compatlayer/cases/distro.py | 26 -- .../scripts/lib/compatlayer/context.py | 14 - meta-refkit/scripts/lib/scriptpath.py | 42 -- meta-refkit/scripts/lib/scriptutils.py | 135 ------ meta-refkit/scripts/yocto-compat-layer.py | 199 --------- 11 files changed, 1 insertion(+), 1076 deletions(-) delete mode 100644 meta-refkit/scripts/lib/compatlayer/__init__.py delete mode 100644 meta-refkit/scripts/lib/compatlayer/case.py delete mode 100644 meta-refkit/scripts/lib/compatlayer/cases/__init__.py delete mode 100644 meta-refkit/scripts/lib/compatlayer/cases/bsp.py delete mode 100644 meta-refkit/scripts/lib/compatlayer/cases/common.py delete mode 100644 meta-refkit/scripts/lib/compatlayer/cases/distro.py delete mode 100644 meta-refkit/scripts/lib/compatlayer/context.py delete mode 100644 meta-refkit/scripts/lib/scriptpath.py delete mode 100644 meta-refkit/scripts/lib/scriptutils.py delete mode 100755 meta-refkit/scripts/yocto-compat-layer.py diff --git a/meta-refkit/lib/oeqa/selftest/cases/refkit_poky.py b/meta-refkit/lib/oeqa/selftest/cases/refkit_poky.py index 8fcda9fd26..cf72a31bfa 100644 --- a/meta-refkit/lib/oeqa/selftest/cases/refkit_poky.py +++ b/meta-refkit/lib/oeqa/selftest/cases/refkit_poky.py @@ -62,9 +62,7 @@ def test(self): - proper declaration of dependencies (because 'yocto-compat-layer.py --dependency' adds those) - parse and dependencies ('bitbake -S none world' must work) """ - # We must use our forked yocto-compat-layer.py. - cmd = "%s/scripts/yocto-compat-layer.py --dependency %s -- %s" % ( - self.layers['meta-refkit'], + cmd = "yocto-compat-layer.py --dependency %s -- %s" % ( ' '.join(self.layers.values()), self.layers[refkit_layer]) # "world" does not include images. We need to enable them explicitly, otherwise @@ -144,8 +142,6 @@ def setUpClass(cls): # We expect compatlayer in the lib dir of the directory holding yocto-compat-layer.py. yocto_compat_layer = shutil.which('yocto-compat-layer.py') scripts_path = os.path.dirname(os.path.realpath(yocto_compat_layer)) - # Temporary override: use the copy from meta-refkit. - scripts_path = os.path.join(cls.layers['meta-refkit'], 'scripts') cls.yocto_compat_lib_path = scripts_path + '/lib' def setUpLocal(self): @@ -180,10 +176,6 @@ def setUpLocal(self): os.symlink(os.path.join(self.layers['meta-refkit-core'], 'lib', target), os.path.join(lib, target)) - env = os.environ.copy() - # We must use our forked yocto-compat-layer.py. - env['PATH'] = '%s/scripts:%s' % (self.layers['meta-refkit'], env['PATH']) - # Enter the build directory. self.old_env = os.environ.copy() self.old_cwd = os.getcwd() diff --git a/meta-refkit/scripts/lib/compatlayer/__init__.py b/meta-refkit/scripts/lib/compatlayer/__init__.py deleted file mode 100644 index edd866ce1e..0000000000 --- a/meta-refkit/scripts/lib/compatlayer/__init__.py +++ /dev/null @@ -1,392 +0,0 @@ -# Yocto Project compatibility layer tool -# -# Copyright (C) 2017 Intel Corporation -# Released under the MIT license (see COPYING.MIT) - -import os -import re -import subprocess -from enum import Enum - -import bb.tinfoil - -class LayerType(Enum): - BSP = 0 - DISTRO = 1 - SOFTWARE = 2 - ERROR_NO_LAYER_CONF = 98 - ERROR_BSP_DISTRO = 99 - -def _get_configurations(path): - configs = [] - - for f in os.listdir(path): - file_path = os.path.join(path, f) - if os.path.isfile(file_path) and f.endswith('.conf'): - configs.append(f[:-5]) # strip .conf - return configs - -def _get_layer_collections(layer_path, lconf=None, data=None): - import bb.parse - import bb.data - - if lconf is None: - lconf = os.path.join(layer_path, 'conf', 'layer.conf') - - if data is None: - ldata = bb.data.init() - bb.parse.init_parser(ldata) - else: - ldata = data.createCopy() - - ldata.setVar('LAYERDIR', layer_path) - try: - ldata = bb.parse.handle(lconf, ldata, include=True) - except BaseException as exc: - raise LayerError(exc) - ldata.expandVarref('LAYERDIR') - - collections = (ldata.getVar('BBFILE_COLLECTIONS', True) or '').split() - if not collections: - name = os.path.basename(layer_path) - collections = [name] - - collections = {c: {} for c in collections} - for name in collections: - priority = ldata.getVar('BBFILE_PRIORITY_%s' % name, True) - pattern = ldata.getVar('BBFILE_PATTERN_%s' % name, True) - depends = ldata.getVar('LAYERDEPENDS_%s' % name, True) - collections[name]['priority'] = priority - collections[name]['pattern'] = pattern - collections[name]['depends'] = depends - - return collections - -def _detect_layer(layer_path): - """ - Scans layer directory to detect what type of layer - is BSP, Distro or Software. - - Returns a dictionary with layer name, type and path. - """ - - layer = {} - layer_name = os.path.basename(layer_path) - - layer['name'] = layer_name - layer['path'] = layer_path - layer['conf'] = {} - - if not os.path.isfile(os.path.join(layer_path, 'conf', 'layer.conf')): - layer['type'] = LayerType.ERROR_NO_LAYER_CONF - return layer - - machine_conf = os.path.join(layer_path, 'conf', 'machine') - distro_conf = os.path.join(layer_path, 'conf', 'distro') - - is_bsp = False - is_distro = False - - if os.path.isdir(machine_conf): - machines = _get_configurations(machine_conf) - if machines: - is_bsp = True - - if os.path.isdir(distro_conf): - distros = _get_configurations(distro_conf) - if distros: - is_distro = True - - if is_bsp and is_distro: - layer['type'] = LayerType.ERROR_BSP_DISTRO - elif is_bsp: - layer['type'] = LayerType.BSP - layer['conf']['machines'] = machines - elif is_distro: - layer['type'] = LayerType.DISTRO - layer['conf']['distros'] = distros - else: - layer['type'] = LayerType.SOFTWARE - - layer['collections'] = _get_layer_collections(layer['path']) - - return layer - -def detect_layers(layer_directories, no_auto): - layers = [] - - for directory in layer_directories: - directory = os.path.realpath(directory) - if directory[-1] == '/': - directory = directory[0:-1] - - if no_auto: - conf_dir = os.path.join(directory, 'conf') - if os.path.isdir(conf_dir): - layer = _detect_layer(directory) - if layer: - layers.append(layer) - else: - for root, dirs, files in os.walk(directory): - dir_name = os.path.basename(root) - conf_dir = os.path.join(root, 'conf') - if os.path.isdir(conf_dir): - layer = _detect_layer(root) - if layer: - layers.append(layer) - - return layers - -def _find_layer_depends(depend, layers): - for layer in layers: - for collection in layer['collections']: - if depend == collection: - return layer - return None - -def add_layer_dependencies(bblayersconf, layer, layers, logger): - def recurse_dependencies(depends, layer, layers, logger, ret = []): - logger.debug('Processing dependencies %s for layer %s.' % \ - (depends, layer['name'])) - - for depend in depends.split(): - # core (oe-core) is suppose to be provided - if depend == 'core': - continue - - layer_depend = _find_layer_depends(depend, layers) - if not layer_depend: - logger.error('Layer %s depends on %s and isn\'t found.' % \ - (layer['name'], depend)) - ret = None - continue - - # We keep processing, even if ret is None, this allows us to report - # multiple errors at once - if ret is not None and layer_depend not in ret: - ret.append(layer_depend) - - # Recursively process... - if 'collections' not in layer_depend: - continue - - for collection in layer_depend['collections']: - collect_deps = layer_depend['collections'][collection]['depends'] - if not collect_deps: - continue - ret = recurse_dependencies(collect_deps, layer_depend, layers, logger, ret) - - return ret - - layer_depends = [] - for collection in layer['collections']: - depends = layer['collections'][collection]['depends'] - if not depends: - continue - - layer_depends = recurse_dependencies(depends, layer, layers, logger, layer_depends) - - # Note: [] (empty) is allowed, None is not! - if layer_depends is None: - return False - else: - # Don't add a layer more that is already present. - added = set() - output = check_command('Getting existing layers failed.', 'bitbake-layers show-layers').decode('utf-8') - for layer, path, pri in re.findall(r'^(\S+) +([^\n]*?) +(\d+)$', output, re.MULTILINE): - added.add(path) - - for layer_depend in layer_depends: - name = layer_depend['name'] - path = layer_depend['path'] - if path in added: - continue - else: - added.add(path) - logger.info('Adding layer dependency %s' % name) - with open(bblayersconf, 'a+') as f: - f.write("\nBBLAYERS += \"%s\"\n" % path) - return True - -def add_layer(bblayersconf, layer, layers, logger): - logger.info('Adding layer %s' % layer['name']) - with open(bblayersconf, 'a+') as f: - f.write("\nBBLAYERS += \"%s\"\n" % layer['path']) - - return True - -def check_command(error_msg, cmd): - ''' - Run a command under a shell, capture stdout and stderr in a single stream, - throw an error when command returns non-zero exit code. Returns the output. - ''' - - p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - output, _ = p.communicate() - if p.returncode: - msg = "%s\nCommand: %s\nOutput:\n%s" % (error_msg, cmd, output.decode('utf-8')) - raise RuntimeError(msg) - return output - -def get_signatures(builddir, failsafe=False, machine=None): - import re - - # some recipes needs to be excluded like meta-world-pkgdata - # because a layer can add recipes to a world build so signature - # will be change - exclude_recipes = ('meta-world-pkgdata',) - - sigs = {} - tune2tasks = {} - - cmd = '' - if machine: - cmd += 'MACHINE=%s ' % machine - cmd += 'bitbake ' - if failsafe: - cmd += '-k ' - cmd += '-S none world' - sigs_file = os.path.join(builddir, 'locked-sigs.inc') - if os.path.exists(sigs_file): - os.unlink(sigs_file) - try: - check_command('Generating signatures failed. This might be due to some parse error and/or general layer incompatibilities.', - cmd) - except RuntimeError as ex: - if failsafe and os.path.exists(sigs_file): - # Ignore the error here. Most likely some recipes active - # in a world build lack some dependencies. There is a - # separate test_machine_world_build which exposes the - # failure. - pass - else: - raise - - sig_regex = re.compile("^(?P.*:.*):(?P.*) .$") - tune_regex = re.compile("(^|\s)SIGGEN_LOCKEDSIGS_t-(?P\S*)\s*=\s*") - current_tune = None - with open(sigs_file, 'r') as f: - for line in f.readlines(): - line = line.strip() - t = tune_regex.search(line) - if t: - current_tune = t.group('tune') - s = sig_regex.match(line) - if s: - exclude = False - for er in exclude_recipes: - (recipe, task) = s.group('task').split(':') - if er == recipe: - exclude = True - break - if exclude: - continue - - sigs[s.group('task')] = s.group('hash') - tune2tasks.setdefault(current_tune, []).append(s.group('task')) - - if not sigs: - raise RuntimeError('Can\'t load signatures from %s' % sigs_file) - - return (sigs, tune2tasks) - -def get_depgraph(targets=['world'], failsafe=False): - ''' - Returns the dependency graph for the given target(s). - The dependency graph is taken directly from DepTreeEvent. - ''' - depgraph = None - with bb.tinfoil.Tinfoil() as tinfoil: - tinfoil.prepare(config_only=False) - tinfoil.set_event_mask(['bb.event.NoProvider', 'bb.event.DepTreeGenerated', 'bb.command.CommandCompleted']) - if not tinfoil.run_command('generateDepTreeEvent', targets, 'do_build'): - raise RuntimeError('starting generateDepTreeEvent failed') - while True: - event = tinfoil.wait_event(timeout=1000) - if event: - if isinstance(event, bb.command.CommandFailed): - raise RuntimeError('Generating dependency information failed: %s' % event.error) - elif isinstance(event, bb.command.CommandCompleted): - break - elif isinstance(event, bb.event.NoProvider): - if failsafe: - # The event is informational, we will get information about the - # remaining dependencies eventually and thus can ignore this - # here like we do in get_signatures(), if desired. - continue - if event._reasons: - raise RuntimeError('Nothing provides %s: %s' % (event._item, event._reasons)) - else: - raise RuntimeError('Nothing provides %s.' % (event._item)) - elif isinstance(event, bb.event.DepTreeGenerated): - depgraph = event._depgraph - - if depgraph is None: - raise RuntimeError('Could not retrieve the depgraph.') - return depgraph - -def compare_signatures(old_sigs, curr_sigs): - ''' - Compares the result of two get_signatures() calls. Returns None if no - problems found, otherwise a string that can be used as additional - explanation in self.fail(). - ''' - # task -> (old signature, new signature) - sig_diff = {} - for task in old_sigs: - if task in curr_sigs and \ - old_sigs[task] != curr_sigs[task]: - sig_diff[task] = (old_sigs[task], curr_sigs[task]) - - if not sig_diff: - return None - - # Beware, depgraph uses task=. whereas get_signatures() - # uses :. Need to convert sometimes. The output follows - # the convention from get_signatures() because that seems closer to - # normal bitbake output. - def sig2graph(task): - pn, taskname = task.rsplit(':', 1) - return pn + '.' + taskname - def graph2sig(task): - pn, taskname = task.rsplit('.', 1) - return pn + ':' + taskname - depgraph = get_depgraph(failsafe=True) - depends = depgraph['tdepends'] - - # If a task A has a changed signature, but none of its - # dependencies, then we need to report it because it is - # the one which introduces a change. Any task depending on - # A (directly or indirectly) will also have a changed - # signature, but we don't need to report it. It might have - # its own changes, which will become apparent once the - # issues that we do report are fixed and the test gets run - # again. - sig_diff_filtered = [] - for task, (old_sig, new_sig) in sig_diff.items(): - deps_tainted = False - for dep in depends.get(sig2graph(task), ()): - if graph2sig(dep) in sig_diff: - deps_tainted = True - break - if not deps_tainted: - sig_diff_filtered.append((task, old_sig, new_sig)) - - msg = [] - msg.append('%d signatures changed, initial differences (first hash before, second after):' % - len(sig_diff)) - for diff in sorted(sig_diff_filtered): - recipe, taskname = diff[0].rsplit(':', 1) - cmd = 'bitbake-diffsigs --task %s %s --signature %s %s' % \ - (recipe, taskname, diff[1], diff[2]) - msg.append(' %s: %s -> %s' % diff) - msg.append(' %s' % cmd) - try: - output = check_command('Determining signature difference failed.', - cmd).decode('utf-8') - except RuntimeError as error: - output = str(error) - if output: - msg.extend([' ' + line for line in output.splitlines()]) - msg.append('') - return '\n'.join(msg) diff --git a/meta-refkit/scripts/lib/compatlayer/case.py b/meta-refkit/scripts/lib/compatlayer/case.py deleted file mode 100644 index 54ce78aa60..0000000000 --- a/meta-refkit/scripts/lib/compatlayer/case.py +++ /dev/null @@ -1,7 +0,0 @@ -# Copyright (C) 2017 Intel Corporation -# Released under the MIT license (see COPYING.MIT) - -from oeqa.core.case import OETestCase - -class OECompatLayerTestCase(OETestCase): - pass diff --git a/meta-refkit/scripts/lib/compatlayer/cases/__init__.py b/meta-refkit/scripts/lib/compatlayer/cases/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/meta-refkit/scripts/lib/compatlayer/cases/bsp.py b/meta-refkit/scripts/lib/compatlayer/cases/bsp.py deleted file mode 100644 index 43efae406f..0000000000 --- a/meta-refkit/scripts/lib/compatlayer/cases/bsp.py +++ /dev/null @@ -1,204 +0,0 @@ -# Copyright (C) 2017 Intel Corporation -# Released under the MIT license (see COPYING.MIT) - -import unittest - -from compatlayer import LayerType, get_signatures, check_command, get_depgraph -from compatlayer.case import OECompatLayerTestCase - -class BSPCompatLayer(OECompatLayerTestCase): - @classmethod - def setUpClass(self): - if self.tc.layer['type'] != LayerType.BSP: - raise unittest.SkipTest("BSPCompatLayer: Layer %s isn't BSP one." %\ - self.tc.layer['name']) - - def test_bsp_defines_machines(self): - self.assertTrue(self.tc.layer['conf']['machines'], - "Layer is BSP but doesn't defines machines.") - - def test_bsp_no_set_machine(self): - from oeqa.utils.commands import get_bb_var - - machine = get_bb_var('MACHINE') - self.assertEqual(self.td['bbvars']['MACHINE'], machine, - msg="Layer %s modified machine %s -> %s" % \ - (self.tc.layer['name'], self.td['bbvars']['MACHINE'], machine)) - - - def test_machine_world(self): - ''' - "bitbake world" is expected to work regardless which machine is selected. - BSP layers sometimes break that by enabling a recipe for a certain machine - without checking whether that recipe actually can be built in the current - distro configuration (for example, OpenGL might not enabled). - - This test iterates over all machines. It would be nicer to instantiate - it once per machine. It merely checks for errors during parse - time. It does not actually attempt to build anything. - ''' - - if not self.td['machines']: - self.skipTest('No machines set with --machines.') - msg = [] - for machine in self.td['machines']: - # In contrast to test_machine_signatures() below, errors are fatal here. - try: - get_signatures(self.td['builddir'], failsafe=False, machine=machine) - except RuntimeError as ex: - msg.append(str(ex)) - if msg: - msg.insert(0, 'The following machines broke a world build:') - self.fail('\n'.join(msg)) - - def test_machine_signatures(self): - ''' - Selecting a machine may only affect the signature of tasks that are specific - to that machine. In other words, when MACHINE=A and MACHINE=B share a recipe - foo and the output of foo, then both machine configurations must build foo - in exactly the same way. Otherwise it is not possible to use both machines - in the same distribution. - - This criteria can only be tested by testing different machines in combination, - i.e. one main layer, potentially several additional BSP layers and an explicit - choice of machines: - yocto-compat-layer --additional-layers .../meta-intel --machines intel-corei7-64 imx6slevk -- .../meta-freescale - ''' - - if not self.td['machines']: - self.skipTest('No machines set with --machines.') - - # Collect signatures for all machines that we are testing - # and merge that into a hash: - # tune -> task -> signature -> list of machines with that combination - # - # It is an error if any tune/task pair has more than one signature, - # because that implies that the machines that caused those different - # signatures do not agree on how to execute the task. - tunes = {} - # Preserve ordering of machines as chosen by the user. - for machine in self.td['machines']: - curr_sigs, tune2tasks = get_signatures(self.td['builddir'], failsafe=True, machine=machine) - # Invert the tune -> [tasks] mapping. - tasks2tune = {} - for tune, tasks in tune2tasks.items(): - for task in tasks: - tasks2tune[task] = tune - for task, sighash in curr_sigs.items(): - tunes.setdefault(tasks2tune[task], {}).setdefault(task, {}).setdefault(sighash, []).append(machine) - - msg = [] - pruned = 0 - last_line_key = None - # do_fetch, do_unpack, ..., do_build - taskname_list = [] - if tunes: - # The output below is most useful when we start with tasks that are at - # the bottom of the dependency chain, i.e. those that run first. If - # those tasks differ, the rest also does. - # - # To get an ordering of tasks, we do a topological sort of the entire - # depgraph for the base configuration, then on-the-fly flatten that list by stripping - # out the recipe names and removing duplicates. The base configuration - # is not necessarily representative, but should be close enough. Tasks - # that were not encountered get a default priority. - depgraph = get_depgraph() - depends = depgraph['tdepends'] - WHITE = 1 - GRAY = 2 - BLACK = 3 - color = {} - found = set() - def visit(task): - color[task] = GRAY - for dep in depends.get(task, ()): - if color.setdefault(dep, WHITE) == WHITE: - visit(dep) - color[task] = BLACK - pn, taskname = task.rsplit('.', 1) - if taskname not in found: - taskname_list.append(taskname) - found.add(taskname) - for task in depends.keys(): - if color.setdefault(task, WHITE) == WHITE: - visit(task) - - taskname_order = dict([(task, index) for index, task in enumerate(taskname_list) ]) - def task_key(task): - pn, taskname = task.rsplit(':', 1) - return (pn, taskname_order.get(taskname, len(taskname_list)), taskname) - - for tune in sorted(tunes.keys()): - tasks = tunes[tune] - # As for test_signatures it would be nicer to sort tasks - # by dependencies here, but that is harder because we have - # to report on tasks from different machines, which might - # have different dependencies. We resort to pruning the - # output by reporting only one task per recipe if the set - # of machines matches. - # - # "bitbake-diffsigs -t -s" is intelligent enough to print - # diffs recursively, so often it does not matter that much - # if we don't pick the underlying difference - # here. However, sometimes recursion fails - # (https://bugzilla.yoctoproject.org/show_bug.cgi?id=6428). - # - # To mitigate that a bit, we use a hard-coded ordering of - # tasks that represents how they normally run and prefer - # to print the ones that run first. - for task in sorted(tasks.keys(), key=task_key): - signatures = tasks[task] - # do_build can be ignored: it is know to have - # different signatures in some cases, for example in - # the allarch ca-certificates due to RDEPENDS=openssl. - # That particular dependency is whitelisted via - # SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS, but still shows up - # in the sstate signature hash because filtering it - # out would be hard and running do_build multiple - # times doesn't really matter. - if len(signatures.keys()) > 1 and \ - not task.endswith(':do_build'): - # Error! - # - # Sort signatures by machines, because the hex values don't mean anything. - # => all-arch adwaita-icon-theme:do_build: 1234... (beaglebone, qemux86) != abcdf... (qemux86-64) - # - # Skip the line if it is covered already by the predecessor (same pn, same sets of machines). - pn, taskname = task.rsplit(':', 1) - next_line_key = (pn, sorted(signatures.values())) - if next_line_key != last_line_key: - line = ' %s %s: ' % (tune, task) - line += ' != '.join(['%s (%s)' % (signature, ', '.join([m for m in signatures[signature]])) for - signature in sorted(signatures.keys(), key=lambda s: signatures[s])]) - last_line_key = next_line_key - msg.append(line) - # Randomly pick two mismatched signatures and remember how to invoke - # bitbake-diffsigs for them. - iterator = iter(signatures.items()) - a = next(iterator) - b = next(iterator) - diffsig_machines = '(%s) != (%s)' % (', '.join(a[1]), ', '.join(b[1])) - diffsig_params = '-t %s %s -s %s %s' % (pn, taskname, a[0], b[0]) - else: - pruned += 1 - - if msg: - msg.insert(0, 'The machines have conflicting signatures for some shared tasks:') - if pruned > 0: - msg.append('') - msg.append('%d tasks where not listed because some other task of the recipe already differed.' % pruned) - msg.append('It is likely that differences from different recipes also have the same root cause.') - msg.append('') - # Explain how to investigate... - msg.append('To investigate, run bitbake-diffsigs -t recipename taskname -s fromsig tosig.') - cmd = 'bitbake-diffsigs %s' % diffsig_params - msg.append('Example: %s in the last line' % diffsig_machines) - msg.append('Command: %s' % cmd) - # ... and actually do it automatically for that example, but without aborting - # when that fails. - try: - output = check_command('Comparing signatures failed.', cmd).decode('utf-8') - except RuntimeError as ex: - output = str(ex) - msg.extend([' ' + line for line in output.splitlines()]) - self.fail('\n'.join(msg)) diff --git a/meta-refkit/scripts/lib/compatlayer/cases/common.py b/meta-refkit/scripts/lib/compatlayer/cases/common.py deleted file mode 100644 index b8d7c3a78d..0000000000 --- a/meta-refkit/scripts/lib/compatlayer/cases/common.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (C) 2017 Intel Corporation -# Released under the MIT license (see COPYING.MIT) - -import glob -import os -import unittest -from compatlayer import get_signatures, LayerType, check_command, get_depgraph, compare_signatures -from compatlayer.case import OECompatLayerTestCase - -class CommonCompatLayer(OECompatLayerTestCase): - def test_readme(self): - # The top-level README file may have a suffix (like README.rst or README.txt). - readme_files = glob.glob(os.path.join(self.tc.layer['path'], 'README*')) - self.assertTrue(len(readme_files) > 0, - msg="Layer doesn't contains README file.") - - # There might be more than one file matching the file pattern above - # (for example, README.rst and README-COPYING.rst). The one with the shortest - # name is considered the "main" one. - readme_file = sorted(readme_files)[0] - data = '' - with open(readme_file, 'r') as f: - data = f.read() - self.assertTrue(data, - msg="Layer contains a README file but it is empty.") - - def test_parse(self): - check_command('Layer %s failed to parse.' % self.tc.layer['name'], - 'bitbake -p') - - def test_show_environment(self): - check_command('Layer %s failed to show environment.' % self.tc.layer['name'], - 'bitbake -e') - - def test_world(self): - ''' - "bitbake world" is expected to work. test_signatures does not cover that - because it is more lenient and ignores recipes in a world build that - are not actually buildable, so here we fail when "bitbake -S none world" - fails. - ''' - get_signatures(self.td['builddir'], failsafe=False) - - def test_signatures(self): - curr_sigs, _ = get_signatures(self.td['builddir'], failsafe=True) - msg = compare_signatures(self.td['sigs'], curr_sigs) - if msg is not None: - self.fail('Adding layer %s changed signatures.\n%s' % (self.tc.layer['name'], msg)) diff --git a/meta-refkit/scripts/lib/compatlayer/cases/distro.py b/meta-refkit/scripts/lib/compatlayer/cases/distro.py deleted file mode 100644 index 523acc1e78..0000000000 --- a/meta-refkit/scripts/lib/compatlayer/cases/distro.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (C) 2017 Intel Corporation -# Released under the MIT license (see COPYING.MIT) - -import unittest - -from compatlayer import LayerType -from compatlayer.case import OECompatLayerTestCase - -class DistroCompatLayer(OECompatLayerTestCase): - @classmethod - def setUpClass(self): - if self.tc.layer['type'] != LayerType.DISTRO: - raise unittest.SkipTest("DistroCompatLayer: Layer %s isn't Distro one." %\ - self.tc.layer['name']) - - def test_distro_defines_distros(self): - self.assertTrue(self.tc.layer['conf']['distros'], - "Layer is BSP but doesn't defines machines.") - - def test_distro_no_set_distros(self): - from oeqa.utils.commands import get_bb_var - - distro = get_bb_var('DISTRO') - self.assertEqual(self.td['bbvars']['DISTRO'], distro, - msg="Layer %s modified distro %s -> %s" % \ - (self.tc.layer['name'], self.td['bbvars']['DISTRO'], distro)) diff --git a/meta-refkit/scripts/lib/compatlayer/context.py b/meta-refkit/scripts/lib/compatlayer/context.py deleted file mode 100644 index 4932238798..0000000000 --- a/meta-refkit/scripts/lib/compatlayer/context.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (C) 2017 Intel Corporation -# Released under the MIT license (see COPYING.MIT) - -import os -import sys -import glob -import re - -from oeqa.core.context import OETestContext - -class CompatLayerTestContext(OETestContext): - def __init__(self, td=None, logger=None, layer=None): - super(CompatLayerTestContext, self).__init__(td, logger) - self.layer = layer diff --git a/meta-refkit/scripts/lib/scriptpath.py b/meta-refkit/scripts/lib/scriptpath.py deleted file mode 100644 index 4e9fc3d39b..0000000000 --- a/meta-refkit/scripts/lib/scriptpath.py +++ /dev/null @@ -1,42 +0,0 @@ -# Path utility functions for OE python scripts -# -# Copyright (C) 2012-2014 Intel Corporation -# Copyright (C) 2011 Mentor Graphics Corporation -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -import sys -import os -import os.path - -def add_oe_lib_path(): - basepath = os.path.abspath(os.path.dirname(__file__) + '/../../../openembedded-core') - newpath = basepath + '/meta/lib' - sys.path.insert(0, newpath) - -def add_bitbake_lib_path(): - basepath = os.path.abspath(os.path.dirname(__file__) + '/../..') - bitbakepath = None - if os.path.exists(basepath + '/bitbake/lib/bb'): - bitbakepath = basepath + '/bitbake' - else: - # look for bitbake/bin dir in PATH - for pth in os.environ['PATH'].split(':'): - if os.path.exists(os.path.join(pth, '../lib/bb')): - bitbakepath = os.path.abspath(os.path.join(pth, '..')) - break - - if bitbakepath: - sys.path.insert(0, bitbakepath + '/lib') - return bitbakepath diff --git a/meta-refkit/scripts/lib/scriptutils.py b/meta-refkit/scripts/lib/scriptutils.py deleted file mode 100644 index 4ccbe5c108..0000000000 --- a/meta-refkit/scripts/lib/scriptutils.py +++ /dev/null @@ -1,135 +0,0 @@ -# Script utility functions -# -# Copyright (C) 2014 Intel Corporation -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -import sys -import os -import logging -import glob -import argparse -import subprocess - -def logger_create(name, stream=None): - logger = logging.getLogger(name) - loggerhandler = logging.StreamHandler(stream=stream) - loggerhandler.setFormatter(logging.Formatter("%(levelname)s: %(message)s")) - logger.addHandler(loggerhandler) - logger.setLevel(logging.INFO) - return logger - -def logger_setup_color(logger, color='auto'): - from bb.msg import BBLogFormatter - console = logging.StreamHandler(sys.stdout) - formatter = BBLogFormatter("%(levelname)s: %(message)s") - console.setFormatter(formatter) - logger.handlers = [console] - if color == 'always' or (color=='auto' and console.stream.isatty()): - formatter.enable_color() - - -def load_plugins(logger, plugins, pluginpath): - import imp - - def load_plugin(name): - logger.debug('Loading plugin %s' % name) - fp, pathname, description = imp.find_module(name, [pluginpath]) - try: - return imp.load_module(name, fp, pathname, description) - finally: - if fp: - fp.close() - - def plugin_name(filename): - return os.path.splitext(os.path.basename(filename))[0] - - known_plugins = [plugin_name(p.__name__) for p in plugins] - logger.debug('Loading plugins from %s...' % pluginpath) - for fn in glob.glob(os.path.join(pluginpath, '*.py')): - name = plugin_name(fn) - if name != '__init__' and name not in known_plugins: - plugin = load_plugin(name) - if hasattr(plugin, 'plugin_init'): - plugin.plugin_init(plugins) - plugins.append(plugin) - -def git_convert_standalone_clone(repodir): - """If specified directory is a git repository, ensure it's a standalone clone""" - import bb.process - if os.path.exists(os.path.join(repodir, '.git')): - alternatesfile = os.path.join(repodir, '.git', 'objects', 'info', 'alternates') - if os.path.exists(alternatesfile): - # This will have been cloned with -s, so we need to convert it so none - # of the contents is shared - bb.process.run('git repack -a', cwd=repodir) - os.remove(alternatesfile) - -def fetch_uri(d, uri, destdir, srcrev=None): - """Fetch a URI to a local directory""" - import bb.data - bb.utils.mkdirhier(destdir) - localdata = bb.data.createCopy(d) - localdata.setVar('BB_STRICT_CHECKSUM', '') - localdata.setVar('SRCREV', srcrev) - ret = (None, None) - olddir = os.getcwd() - try: - fetcher = bb.fetch2.Fetch([uri], localdata) - for u in fetcher.ud: - ud = fetcher.ud[u] - ud.ignore_checksums = True - fetcher.download() - for u in fetcher.ud: - ud = fetcher.ud[u] - if ud.localpath.rstrip(os.sep) == localdata.getVar('DL_DIR').rstrip(os.sep): - raise Exception('Local path is download directory - please check that the URI "%s" is correct' % uri) - fetcher.unpack(destdir) - for u in fetcher.ud: - ud = fetcher.ud[u] - if ud.method.recommends_checksum(ud): - md5value = bb.utils.md5_file(ud.localpath) - sha256value = bb.utils.sha256_file(ud.localpath) - ret = (md5value, sha256value) - finally: - os.chdir(olddir) - return ret - -def run_editor(fn): - if isinstance(fn, str): - params = '"%s"' % fn - else: - params = '' - for fnitem in fn: - params += ' "%s"' % fnitem - - editor = os.getenv('VISUAL', os.getenv('EDITOR', 'vi')) - try: - return subprocess.check_call('%s %s' % (editor, params), shell=True) - except OSError as exc: - logger.error("Execution of editor '%s' failed: %s", editor, exc) - return 1 - -def is_src_url(param): - """ - Check if a parameter is a URL and return True if so - NOTE: be careful about changing this as it will influence how devtool/recipetool command line handling works - """ - if not param: - return False - elif '://' in param: - return True - elif param.startswith('git@') or ('@' in param and param.endswith('.git')): - return True - return False diff --git a/meta-refkit/scripts/yocto-compat-layer.py b/meta-refkit/scripts/yocto-compat-layer.py deleted file mode 100755 index 0d5700b538..0000000000 --- a/meta-refkit/scripts/yocto-compat-layer.py +++ /dev/null @@ -1,199 +0,0 @@ -#!/usr/bin/env python3 - -# Yocto Project compatibility layer tool -# -# Copyright (C) 2017 Intel Corporation -# Released under the MIT license (see COPYING.MIT) - -import os -import sys -import argparse -import logging -import time -import signal -import shutil -import collections - -scripts_path = os.path.dirname(os.path.realpath(__file__)) -lib_path = scripts_path + '/lib' -sys.path = sys.path + [lib_path] -import scriptutils -import scriptpath -scriptpath.add_oe_lib_path() -scriptpath.add_bitbake_lib_path() - -from compatlayer import LayerType, detect_layers, add_layer, add_layer_dependencies, get_signatures -from oeqa.utils.commands import get_bb_vars - -PROGNAME = 'yocto-compat-layer' -CASES_PATHS = [os.path.join(os.path.abspath(os.path.dirname(__file__)), - 'lib', 'compatlayer', 'cases')] -logger = scriptutils.logger_create(PROGNAME, stream=sys.stdout) - -def test_layer_compatibility(td, layer): - from compatlayer.context import CompatLayerTestContext - logger.info("Starting to analyze: %s" % layer['name']) - logger.info("----------------------------------------------------------------------") - - tc = CompatLayerTestContext(td=td, logger=logger, layer=layer) - tc.loadTests(CASES_PATHS) - return tc.runTests() - -def main(): - parser = argparse.ArgumentParser( - description="Yocto Project compatibility layer tool", - add_help=False) - parser.add_argument('layers', metavar='LAYER_DIR', nargs='+', - help='Layer to test compatibility with Yocto Project') - parser.add_argument('-o', '--output-log', - help='File to output log (optional)', action='store') - parser.add_argument('--dependency', nargs="+", - help='Layers to process for dependencies', action='store') - parser.add_argument('--machines', nargs="+", - help='List of MACHINEs to be used during testing', action='store') - parser.add_argument('--additional-layers', nargs="+", - help='List of additional layers to add during testing', action='store') - parser.add_argument('-n', '--no-auto', help='Disable auto layer discovery', - action='store_true') - parser.add_argument('-d', '--debug', help='Enable debug output', - action='store_true') - parser.add_argument('-q', '--quiet', help='Print only errors', - action='store_true') - - parser.add_argument('-h', '--help', action='help', - default=argparse.SUPPRESS, - help='show this help message and exit') - - args = parser.parse_args() - - if args.output_log: - fh = logging.FileHandler(args.output_log) - fh.setFormatter(logging.Formatter("%(levelname)s: %(message)s")) - logger.addHandler(fh) - if args.debug: - logger.setLevel(logging.DEBUG) - elif args.quiet: - logger.setLevel(logging.ERROR) - - if not 'BUILDDIR' in os.environ: - logger.error("You must source the environment before run this script.") - logger.error("$ source oe-init-build-env") - return 1 - builddir = os.environ['BUILDDIR'] - bblayersconf = os.path.join(builddir, 'conf', 'bblayers.conf') - - layers = detect_layers(args.layers, args.no_auto) - if not layers: - logger.error("Fail to detect layers") - return 1 - if args.additional_layers: - additional_layers = detect_layers(args.additional_layers, args.no_auto) - else: - additional_layers = [] - if args.dependency: - dep_layers = detect_layers(args.dependency, args.no_auto) - dep_layers = dep_layers + layers - else: - dep_layers = layers - - logger.info("Detected layers:") - for layer in layers: - if layer['type'] == LayerType.ERROR_BSP_DISTRO: - logger.error("%s: Can't be DISTRO and BSP type at the same time."\ - " The conf/distro and conf/machine folders was found."\ - % layer['name']) - layers.remove(layer) - elif layer['type'] == LayerType.ERROR_NO_LAYER_CONF: - logger.error("%s: Don't have conf/layer.conf file."\ - % layer['name']) - layers.remove(layer) - else: - logger.info("%s: %s, %s" % (layer['name'], layer['type'], - layer['path'])) - if not layers: - return 1 - - shutil.copyfile(bblayersconf, bblayersconf + '.backup') - def cleanup_bblayers(signum, frame): - shutil.copyfile(bblayersconf + '.backup', bblayersconf) - os.unlink(bblayersconf + '.backup') - signal.signal(signal.SIGTERM, cleanup_bblayers) - signal.signal(signal.SIGINT, cleanup_bblayers) - - td = {} - results = collections.OrderedDict() - results_status = collections.OrderedDict() - - layers_tested = 0 - for layer in layers: - if layer['type'] == LayerType.ERROR_NO_LAYER_CONF or \ - layer['type'] == LayerType.ERROR_BSP_DISTRO: - continue - - logger.info('') - logger.info("Setting up for %s(%s), %s" % (layer['name'], layer['type'], - layer['path'])) - - shutil.copyfile(bblayersconf + '.backup', bblayersconf) - - missing_dependencies = not add_layer_dependencies(bblayersconf, layer, dep_layers, logger) - if not missing_dependencies: - for additional_layer in additional_layers: - if not add_layer_dependencies(bblayersconf, additional_layer, dep_layers, logger): - missing_dependencies = True - break - if not add_layer_dependencies(bblayersconf, layer, dep_layers, logger) or \ - any(map(lambda additional_layer: not add_layer_dependencies(bblayersconf, additional_layer, dep_layers, logger), - additional_layers)): - logger.info('Skipping %s due to missing dependencies.' % layer['name']) - results[layer['name']] = None - results_status[layer['name']] = 'SKIPPED (Missing dependencies)' - layers_tested = layers_tested + 1 - continue - - if any(map(lambda additional_layer: not add_layer(bblayersconf, additional_layer, dep_layers, logger), - additional_layers)): - logger.info('Skipping %s due to missing additional layers.' % layer['name']) - results[layer['name']] = None - results_status[layer['name']] = 'SKIPPED (Missing additional layers)' - layers_tested = layers_tested + 1 - continue - - logger.info('Getting initial bitbake variables ...') - td['bbvars'] = get_bb_vars() - logger.info('Getting initial signatures ...') - td['builddir'] = builddir - td['sigs'], td['tunetasks'] = get_signatures(td['builddir']) - td['machines'] = args.machines - - if not add_layer(bblayersconf, layer, dep_layers, logger): - logger.info('Skipping %s ???.' % layer['name']) - results[layer['name']] = None - results_status[layer['name']] = 'SKIPPED (Unknown)' - layers_tested = layers_tested + 1 - continue - - result = test_layer_compatibility(td, layer) - results[layer['name']] = result - results_status[layer['name']] = 'PASS' if results[layer['name']].wasSuccessful() else 'FAIL' - layers_tested = layers_tested + 1 - - if layers_tested: - logger.info('') - logger.info('Summary of results:') - logger.info('') - for layer_name in results_status: - logger.info('%s ... %s' % (layer_name, results_status[layer_name])) - - cleanup_bblayers(None, None) - - return 0 - -if __name__ == '__main__': - try: - ret = main() - except Exception: - ret = 1 - import traceback - traceback.print_exc() - sys.exit(ret) From 883b3ce527583393e6185af3b2a9ae3c2d5e48df Mon Sep 17 00:00:00 2001 From: Olev Kartau Date: Wed, 12 Jul 2017 08:24:21 +0300 Subject: [PATCH 14/21] CI Jenkinsfile: fix xml-reports publish and storing Recent change of test stages accidentally left out some pre- and post-build tests xml-reports handling. Signed-off-by: Olev Kartau --- Jenkinsfile | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 687a629871..ef885ca574 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -92,9 +92,6 @@ try { } } } // docker_image - archiveArtifacts allowEmptyArchive: true, - artifacts: 'build*/TestResults_*/TEST-*.xml' - step_xunit('build*/TestResults_*/TEST-*.xml') tester_script = readFile "docker/tester-exec.sh" tester_summary = readFile "docker/tester-create-summary.sh" qemu_script = readFile "docker/run-qemu.exp" @@ -112,6 +109,16 @@ try { params = ["${script_env}", "docker/post-build.sh"].join("\n") sh "${params}" } + // note wildcard: handle pre-build reports in build.pre/ as well + lock(resource: "global_data") { + summary += sh(returnStdout: true, + script: "docker/tester-create-summary.sh 'oe-selftest: post-build' '' build/TestResults_*/TEST- 0") + archiveArtifacts allowEmptyArchive: true, + artifacts: 'build*/TestResults_*/TEST-*.xml' + } + lock(resource: "step-xunit") { + step_xunit('build*/TestResults_*/TEST-*.xml') + } } } } From 74dc45ae2945f12c1307bb82871fc8a7ef1f21a4 Mon Sep 17 00:00:00 2001 From: Olev Kartau Date: Wed, 12 Jul 2017 12:02:01 +0300 Subject: [PATCH 15/21] CI: publish sstate after post-build tests as well We previously published sstate only after main build, but post-build testing can run faster if it uses sstate. Signed-off-by: Olev Kartau --- Jenkinsfile | 6 +++++- docker/publish-project.sh | 11 ----------- docker/publish-sstate.sh | 26 ++++++++++++++++++++++++++ 3 files changed, 31 insertions(+), 12 deletions(-) create mode 100755 docker/publish-sstate.sh diff --git a/Jenkinsfile b/Jenkinsfile index ef885ca574..60aeadf4ec 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -86,8 +86,10 @@ try { throw e } finally { set_gh_status_pending(is_pr, 'Store images') - params = ["${script_env}", "docker/publish-project.sh"].join("\n") stage('Store images') { + params = ["${script_env}", "docker/publish-project.sh"].join("\n") + sh "${params}" + params = ["${script_env}", "docker/publish-sstate.sh"].join("\n") sh "${params}" } } @@ -108,6 +110,8 @@ try { docker.image(image_name).inside(run_args) { params = ["${script_env}", "docker/post-build.sh"].join("\n") sh "${params}" + params = ["${script_env}", "docker/publish-sstate.sh"].join("\n") + sh "${params}" } // note wildcard: handle pre-build reports in build.pre/ as well lock(resource: "global_data") { diff --git a/docker/publish-project.sh b/docker/publish-project.sh index 217a999ea0..af8ecee362 100755 --- a/docker/publish-project.sh +++ b/docker/publish-project.sh @@ -114,17 +114,6 @@ if [ -f "${LOG}" ]; then rsync -avz ${LOG}* ${_RSYNC_DEST}/ fi -if [ -d sstate-cache ]; then - if [ ! -z ${BUILD_CACHE_DIR+x} ]; then - if [ -d ${BUILD_CACHE_DIR}/sstate ]; then - # populate shared sstate from local sstate: - _src=sstate-cache - _dst=${RSYNC_PUBLISH_DIR}/bb-cache/sstate - find ${_src} -mindepth 1 -maxdepth 1 -type d -exec rsync -a --ignore-existing {} ${_dst}/ \; - fi - fi -fi - ## for debugging signatures: publish stamps if [ -d ${_BRESULT}/stamps ]; then create_remote_dirs ${_RSYNC_DEST} .stamps/${TARGET_MACHINE}/ diff --git a/docker/publish-sstate.sh b/docker/publish-sstate.sh new file mode 100755 index 0000000000..146c3dd3d4 --- /dev/null +++ b/docker/publish-sstate.sh @@ -0,0 +1,26 @@ +#!/bin/sh -xeu +# +# publish-project.sh: Publish local sstate into global sstate +# Copyright (c) 2017, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. + +cd $WORKSPACE/build + +if [ -d sstate-cache ]; then + if [ ! -z ${BUILD_CACHE_DIR+x} ]; then + if [ -d ${BUILD_CACHE_DIR}/sstate ]; then + # populate shared sstate from local sstate, show names for tracability + _src=sstate-cache + _dst=${RSYNC_PUBLISH_DIR}/bb-cache/sstate + find ${_src} -mindepth 1 -maxdepth 1 -type d -exec rsync -a --info=name --ignore-existing {} ${_dst}/ \; + fi + fi +fi From cdade5730373b7da373ed0bc6471404fb5105025 Mon Sep 17 00:00:00 2001 From: Krisztian Litkey Date: Mon, 10 Jul 2017 15:41:50 +0300 Subject: [PATCH 16/21] doc: added doc/howtos/OSTree.rst, updated doc/system-update.rst. Added a brief initial HOWTO about systemd update using OSTree. Also updated the system, update documentation with a pointer to this new HOWTO. Signed-off-by: Krisztian Litkey --- doc/howtos/OSTree.rst | 145 ++++++++++++++++++++++++++++++++++++++++++ doc/system-update.rst | 25 ++++---- 2 files changed, 159 insertions(+), 11 deletions(-) create mode 100644 doc/howtos/OSTree.rst diff --git a/doc/howtos/OSTree.rst b/doc/howtos/OSTree.rst new file mode 100644 index 0000000000..8f6ff01ce2 --- /dev/null +++ b/doc/howtos/OSTree.rst @@ -0,0 +1,145 @@ +Handling System Updates Using OSTree +#################################### + +IoT Reference OS Kit ('refkit') has support for updating devices running +one of the refkit profile images using an HTTP/HTTPs server and OSTree. +OSTree is a library and suite of command line tools, largely inspired by +git, that provides version control and a distribution mechanism for bootable +OS filesystem trees, or other binaries. For a comprehensive introduction +to OSTree, an overview of its architecture and feature set, please refer +to its `documentation _`. + +If enabled, OSTree support in refkit + + * provides A/B-versioning (within a single block device partition) + * creates a per-image OSTree repository for each image built + * exposes builds in a common OSTree repository as a series of commits + * can sign each commit (which is then verified during updates) + * can provide a service for automatic image updates + + +Enabling OSTree Support +####################### + +To enable end-to-end OSTree support with automatic image updates, you will +need to + + * enable OSTree support for your builds + * ideally generate and use a pair of signing keys for your builds/updates + * expose the OSTree repository of your builds over HTTP/HTTPS + * point your clients to the exposed OSTree repository + +To enable OSTree support, turn on the 'ostree' image feature. You can +do this by incuding the following configuration snippet in your local.conf +or other suitable global configuration file:: + + REFKIT_IMAGE_EXTRA_FEATURES += "ostree" + +To use your GPG signing key pair for signing and verifying the OSTree +repository commits, assuming your keys are in the keyrings in +with key ID release@example.org, add the following to your local.conf or +other suitable global Yocto configuration file:: + + OSTREE_GPGDIR = "" + OSTREE_GPGID = "release@example.org" + +Assuming you want to use your build machine, build.example.org, in this +example also as your update server, you can readily point the image for +updates to your update/build server by adding the following to your +local.conf or other suitable global configuration file:: + + OSTREE_REMOTE = "http://build.example.org/ostree/" + +You can also use HTTPS instead of HTTP if you want to and your server is +properly configured for serving HTTPS requests. + +Next you need to expose the OSTree repository your builds are exported to +over HTTP/HTTPS for clients to consume. By default this repository is +located in build/tmp-glibc/deploy/ostree-repo, but you can change this +location by adding the following to your local.conf or other suitable +global configuration file:: + + OSTREE_EXPORT = "" + +Now assuming, you did not change the location, and you use Apache for +service HTTP/HTTPS requests, you can expose this repository with Apache +by adding the following to your Apache configuration:: + + Alias "/ostree/" "/build/tmp-glibc/deploy/ostree-repo/" + + /build/tmp-glibc/deploy/ostree-repo> + Options Indexes FollowSymLinks + Require all granted + + +Finally you should restart (or start) your Apache server to activate the +configuration changes. This might be a good time to also make sure that +any firewall rules you might have will allow your clients access to the +HTTP port of the server. + +Another alternative is to use the built-in trivial HTTP server in ostree +available as the *ostree trivial-httpd* command (if it is enabled at +compile time). With that you could serve out the repository with the +following commands:: + + cd build/tmp-glibc/deploy + ln -sf ostree-repo ostree + ostree trivial-httpd --port 80 + +A third alternative is use a simple Python HTTP server, for instance the +one from the project at:: + + http://git.yoctoproject.org/cgit/cgit.cgi/poky/tree/meta/lib/oeqa/utils/httpserver.py + +which is also available in the refkit source tree as:: + + openembedded-core/meta/lib/oeqa/utils/httpserver.py + +Now with teh above configuration in place, and an HTTP server running, +subsequent builds should get automatically exported and pulled in as +updates by the clients running one of your refkit images. + + +Disabling Automatic Updates +########################### + +If you prefer not to pull in updates automatically to the clients, disable +the refkit-update systemd service. You can do this by running the following +command on the client device:: + + systemctl stop refkit-update.service + systemctl disable refkit-update.service + + +Pulling In Updates Manually +########################### + +If you want to manually pull any potentially available updates, you can do +so by running the following command on a client device:: + + refkit-ostree-update --one-shot + +This will check the server for available updates, pull in any such one, +and request a reboot to activate the changes if the update was successfully +installed. + + +Preventing/Delaying Automatic Reboot +#################################### + +Note that by default after an update has been installed the system will be +rebooted to activate the latest changes. Any entity that needs to prevent +or delay the reboot to a more convenient time in the future should use +systemd-inhibit or the corresponding systemd(-logind) interfaces for doing +so. + +For instance, if you have an interactive shell (or a login session via ssh) +while the updater is running, or you are running it yourself manually, and +you don't want the system to get rebooted under you in case an update does +get pulled in, you should do a:: + + systemd-inhibit --what=shutdown $SHELL + +Once you're done with whatever you were doing and want to allow any pending +updates to proceed to reboot, you can simply exit the innermost shell. + diff --git a/doc/system-update.rst b/doc/system-update.rst index d8ccfc31ce..e446b54820 100644 --- a/doc/system-update.rst +++ b/doc/system-update.rst @@ -91,9 +91,10 @@ OSTree tool working in the running OS, the initramfs also overrides :file:`/proc/cmdline` with a version that has the expected ``ostree`` boot parameter. -``refkit-ostree_git.bb`` - Some on-target helper script, partly used by the initramfs and partly - used as wrapper around the actual :command:`ostree`. +``refkit-ostree.bb`` + On-target helper scripts and binaries. Partly (/usr/bin/refkit-ostree) used + by the initramfs ostree module (/etc/init.d/91-ostree), the rest is and is + used by the refkit ostree updater (/usr/bin/refkit-ostree-update). ``ostree-image.bbclass`` This helper class gets inherited automatically by ``refkit-image.bbclass`` @@ -117,7 +118,8 @@ OSTree Usage See the comments in ``ostree-image.bbclass`` for instructions on how to configure the image creation. In particular, image signing and publishing the permanent OSTree repository require some planning and -customization. +customization. Also check the :file:`howtos/OSTree.rst` for more instructions +and tips on how to use OSTree for system updates. In development images, the default is to use a generated GPG key from :file:`tmp-glibc/deploy/gnupg/` and a "permanent" OSTree repository in @@ -144,13 +146,14 @@ into the release process for a product. .. deltas: https://ostree.readthedocs.io/en/latest/manual/repository-management/#derived-data-static-deltas-and-the-summary-file -Once a device has booted into an OSTree-enabled image, the -:command:`ostree` command can be used as usual. Updates are configured -in :file:`/ostree/repo/config` to pull new OS releases from the -``OSTREE_REMOTE`` URL that was set at build time. - -Beware that system updates should be done with :command:`refkit-ostree -update`, because that will also update the UEFI combo app. +Once a device has booted into an OSTree-enabled image, if the rekit-update +service has been enabled and running (which it is by default), any updates +should get automatically pulled in and activated by a reboot of the device. +If manual updates are preferred, the refkit-update service should be stopped +and/or disabled. Manual updates can be triggered by running +:command:`refkit-ostree-update --one-shot`. Updates are configured in +:file:`/ostree/repo/config` to pull new OS releases from the ``OSTREE_REMOTE`` +URL that was set at build time. OSTree Filesystem ----------------- From 98f325cf2b1c37dcbb0f84994dee9883f52c1994 Mon Sep 17 00:00:00 2001 From: Krisztian Litkey Date: Sun, 9 Jul 2017 18:53:55 +0300 Subject: [PATCH 17/21] meta-refkit-core: use refkit-ostree-update in ostree update selftest. Use the new C-based refkit-ostree-update implementation in the ostree update test cases. Signed-off-by: Krisztian Litkey --- .../lib/oeqa/selftest/cases/refkit_ostree.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/meta-refkit-core/lib/oeqa/selftest/cases/refkit_ostree.py b/meta-refkit-core/lib/oeqa/selftest/cases/refkit_ostree.py index 705bc8a27d..561391b45e 100644 --- a/meta-refkit-core/lib/oeqa/selftest/cases/refkit_ostree.py +++ b/meta-refkit-core/lib/oeqa/selftest/cases/refkit_ostree.py @@ -99,7 +99,14 @@ def create_httpd(): #exec socat 2>>/tmp/ostree.log -D -v -d -d -d -d STDIO TCP:localhost:%d ''' % port) - cmd = '''ostree config set 'remote "updates".url' http://%s && refkit-ostree update''' % self.OSTREE_SERVER + # Use the updater, refkit-ostree-update, in a one-shot mode + # attempting just a single update cycle for the test case. + # Also override the post-apply hook to only run the UEFI app + # update hook. It is a bit of a hack but we don't want the rest + # of the hooks run, especially not the reboot hook, to avoid + # prematurely rebooting the qemu instance and this is the easiest + # way to achieve just that for now. + cmd = '''ostree config set 'remote "updates".url' http://%s && refkit-ostree-update --one-shot --post-apply-hook /usr/share/refkit-ostree/hooks/post-apply.d/00-update-uefi-app''' % self.OSTREE_SERVER status, output = qemu.run_serial(cmd, timeout=600) self.assertEqual(1, status, 'Failed to run command "%s":\n%s' % (cmd, output)) self.logger.info('Successful (?) update:\n%s' % output) From 1472b49cb48221ae366566a1f88fdc2d6bbd0b91 Mon Sep 17 00:00:00 2001 From: Krisztian Litkey Date: Fri, 7 Jul 2017 17:19:53 +0300 Subject: [PATCH 18/21] meta-refkit-core: add GPG keyring with pregenerated keys to the repo. Add a GPG home directory/keyring with pregenerated development mode signing keys to the repository. Also added a minimal script to generate new signing keys. For example, you can run this command in the top-level intel-iot-refkit directory ./meta-refkit-core/files/gnupg/generate-keys.sh \ $(pwd)/gpg-home release@example.org to generate a new key pair (with the id release@example.org), create a new gpg1 keyring in gpg-home, import the keys there and leave them as release@example.org.{pub,sec,cfg} in gpg-home. You can then set OSTREE_GPGDIR (and/or FLATPAK_GPGDIR) to the path of the newly generated gpg-home directory and set OSTREE_GPGID (and/or FLATPAK_GPGID) to release@example.org to use the newly generated keys to sign your base OS (and/or flatpak) ostree repository commits. Signed-off-by: Krisztian Litkey --- meta-refkit-core/files/gnupg/generate-keys.sh | 41 ++++++++++++++++++ meta-refkit-core/files/gnupg/pubring.gpg | Bin 0 -> 1622 bytes meta-refkit-core/files/gnupg/pubring.gpg~ | 0 .../gnupg/refkit-development-signing@key.pub | Bin 0 -> 1614 bytes .../gnupg/refkit-development-signing@key.sec | Bin 0 -> 1699 bytes meta-refkit-core/files/gnupg/secring.gpg | Bin 0 -> 1707 bytes meta-refkit-core/files/gnupg/trustdb.gpg | Bin 0 -> 1200 bytes 7 files changed, 41 insertions(+) create mode 100755 meta-refkit-core/files/gnupg/generate-keys.sh create mode 100644 meta-refkit-core/files/gnupg/pubring.gpg create mode 100644 meta-refkit-core/files/gnupg/pubring.gpg~ create mode 100644 meta-refkit-core/files/gnupg/refkit-development-signing@key.pub create mode 100644 meta-refkit-core/files/gnupg/refkit-development-signing@key.sec create mode 100644 meta-refkit-core/files/gnupg/secring.gpg create mode 100644 meta-refkit-core/files/gnupg/trustdb.gpg diff --git a/meta-refkit-core/files/gnupg/generate-keys.sh b/meta-refkit-core/files/gnupg/generate-keys.sh new file mode 100755 index 0000000000..3801bbaf4a --- /dev/null +++ b/meta-refkit-core/files/gnupg/generate-keys.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +print_usage () { + if [ -n "$1" ]; then + echo "$1" + fi + + echo "$0 generates a DSA GPG key-pair without password protection," + echo "suitable for signing commits in an flatpak/ostree repository." + echo "" + echo "usage: $0 gpg-homedir key-id" + echo "" + echo "For example, running" + echo "" + echo " $0 \$(pwd)/gpg-home release@example.org" + echo "" + echo "generates a GPG key-pair and imports the keys into the GPG keyrings" + echo "in \$(pwd)/gpg-home. It also leaves the exported key pair, and the" + echo "gpg1 batch mode configuration file used to generate the keys as" + echo "" + echo " release@example.org.pub," + echo " release@example.org.sec, and" + echo " release@example.org.cfg" + echo "" + echo "in \$(pwd)/gpg-home." + echo "" + echo "You need gpg1 installed to use this script." + + exit ${2:-1} +} + +if [ "$1" = "-h" -o "$1" = "--help" ]; then + print_usage "" 0 +fi + +if [ "$#" != "2" ]; then + print_usage "invalid command line \"$*\"" 1 +fi + +${0%/*}/../../../meta-flatpak/scripts/gpg-keygen.sh \ + --home "$1" --id "$2" --pub "$1/$2.pub" --sec "$1/$2.sec" diff --git a/meta-refkit-core/files/gnupg/pubring.gpg b/meta-refkit-core/files/gnupg/pubring.gpg new file mode 100644 index 0000000000000000000000000000000000000000..21651e02138e205eb90b934d32d7e1f6d5a8001f GIT binary patch literal 1622 zcmV-c2C4a(11KpE*J>4ZDCT-+SedFSh|$@}Et@aqG>X`v)O6 zbK1@yQQ;Wdu8PKMN`u+Q7fUwO!{4rmUk@{0N|SvPZtA-}$R_jwVs{J@o%@i@sm*QTC z^~DWZ>|jJ6qe8?q`fC9I-@0`Lg?I)wx*1t$-URZR&`1)o4hNW7X2tkrl%}8&2msD# zGH77~y105YB(lkH(Ys*1F{#6(<&-3`q@DGe;n^c=xu@rg_w{W*0m%hSF^MW#b3P&c zs%0AFbHmrw9pm@K(?Z5_397XD7#p-0X!RZ^^tED1i95cL1xySyu5AfdSwj}>l2e7M zS+0Ibr|9zw<8^bG70iKE;b(N*)+>%YV$@YT{P!%S{$^{TDJ2V-QdruFZasc$N$4v| zj{LZcEj!-QQi=3*ushm~Mpo^^QiHb*%M;ijC}@(6-`DX{>1M!p+Vl2u2XJNw&M0dx z4RjSO=sJGT==P-v<&|YTy2b%r6?b=(zxj^K7(wC_p6?uAX832~ImO9dVG#FW#B zJ~OJRm)dN;YD16Et3|xK$bdE?b@*h+4EaBJ@*k z9nX=5oLuXkO+PuNPz#ncwF_ZnvdsIug4~n3q8MxR;R;r$71jxo$z(4p6^hg?zP-f| zZhs-T-oSxd0}NhL)zs;9>ddKDo)_?W(Yt(fqT=>+oFS!w4ZBka-WlC(v@BC;XKrb3 zXCO;uc_2J;WoBz>bS-3Mc4cgDaBXF7bS-mfXKrb3XFzLZc|M4G1QQVm03rnfSykuu z0vjU+3ke7Z0|EvW2m%QT3j`Jd0|5da0Rk6*0162ZgWIk)nl=0{*Vh35Xd;|g$Jx3U zOE1lZa8nh5NK}Nh$djdFzEbP=Vh$?n0RX8lV}$r!=Jw)-R_@8l@rVulhadhTS5;=3X_Wh|da&%f-`DP-1Zb3S;5>7G^g zTa;R<`>}P8pEn-|T1t$v%Mf^tR-SS3NFro>`2TD(ELXi>r$Z&+7qg~JX3ux*nD=W1 zL`(F)LW8MT;a$bS#v^!$!7Xy-J;%y zqaHC_;P*{Y(;WNe0PZGCoo)BW1gVsqJ4DO5m~{hE{>z zOs+@`%9gbL@ga-tRy9U+$#v6qf-%WKDgj$CPwSOhoUOUo3HUudMNNHK-#PjnAwjK^ zg^=P0JX4Zi(&5Y(<=ZBQrvL*72mpeif<9@vOyUiGr;5XaMYIjaSs}1k)uSapoQq-&Q&5vW9ZIIuFES!3VtiQBHqD7j#PAP$|wY@^w7{oUrCq<*?JlhR*RnA9k2 zmjv(5HT>eSSR!!Jt6f<~KBcw|_anQ;o=r?5Q#F>UED z4?+S2X$>Re)b@*V8YqxfS>5Bj!mU3lAX+Q?4A-9JS|m2Mn7Qln8fPW`IZh|pz!_3* zwt~7c&&FA}Obsq>sWisbfW|+LJt9p5XV5p(Wv&jp#UiAgcBfUZ*+A+~IdW(HMWhYR zaiD`@fGYsmDUS0AQ7CL4y0wU51Q-zr00{*GSykuu0vikf3JDN{+pad6HT*BA5CH#1 zMHBN~E}WzJOA(wOUr?3J51Jw)tV*mgE literal 0 HcmV?d00001 diff --git a/meta-refkit-core/files/gnupg/pubring.gpg~ b/meta-refkit-core/files/gnupg/pubring.gpg~ new file mode 100644 index 0000000000..e69de29bb2 diff --git a/meta-refkit-core/files/gnupg/refkit-development-signing@key.pub b/meta-refkit-core/files/gnupg/refkit-development-signing@key.pub new file mode 100644 index 0000000000000000000000000000000000000000..d0b1f5d88a24e92385434b6f611157021fc8de4c GIT binary patch literal 1614 zcmV-U2C?~>11KpE*J>4ZDCT-+SedFSh|$@}Et@aqG>X`v)O6 zbK1@yQQ;Wdu8PKMN`u+Q7fUwO!{4rmUk@{0N|SvPZtA-}$R_jwVs{J@o%@i@sm*QTC z^~DWZ>|jJ6qe8?q`fC9I-@0`Lg?I)wx*1t$-URZR&`1)o4hNW7X2tkrl%}8&2msD# zGH77~y105YB(lkH(Ys*1F{#6(<&-3`q@DGe;n^c=xu@rg_w{W*0m%hSF^MW#b3P&c zs%0AFbHmrw9pm@K(?Z5_397XD7#p-0X!RZ^^tED1i95cL1xySyu5AfdSwj}>l2e7M zS+0Ibr|9zw<8^bG70iKE;b(N*)+>%YV$@YT{P!%S{$^{TDJ2V-QdruFZasc$N$4v| zj{LZcEj!-QQi=3*ushm~Mpo^^QiHb*%M;ijC}@(6-`DX{>1M!p+Vl2u2XJNw&M0dx z4RjSO=sJGT==P-v<&|YTy2b%r6?b=(zxj^K7(wC_p6?uAX832~ImO9dVG#FW#B zJ~OJRm)dN;YD16Et3|xK$bdE?b@*h+4EaBJ@*k z9nX=5oLuXkO+PuNPz#ncwF_ZnvdsIug4~n3q8MxR;R;r$71jxo$z(4p6^hg?zP-f| zZhs-T-oSxd0}NhL)zs;9>ddKDo)_?W(Yt(fqT=>+oFS!w4ZBka-WlC(v@BC;XKrb3 zXCO;uc_2J;WoBz>bS-3Mc4cgDaBXF7bS-mfXKrb3XFzLZc|M4G1QQVm03rnfSykuu z0vjU+3ke7Z0|EvW2m%QT3j`Jd0|5da0Rk6*0162ZgWIk)nl=0{*Vh35Xd;|g$Jx3U zOE1lZa8nh5NK}Nh$djdFzEbP=Vh$?n0RX8lV}$r!=Jw)-R_@8l@rVulhadhTS5;=3X_Wh|da&%f-`DP-1Zb3S;5>7G^gTa;R< z`>}P8pEn-|T1t$v%Mf^tR-SS3NFro>`2TD(ELXi>r$Z&+7qg~JX3ux*nD=W1L`(F) zLW8MT;a$bS#v^!$!7Xy-J;%yqaHC< z?-2pZWpPI}98$C8yS-eq>_;P*{Y(;WNe0PZGCoo)BW1gVsqJ4DO5m~{hE{>zOs+@` z%9gbL@ga-tRy9U+$#v6qf-%WKDgj$CPwSOhoUOUo3HUudMNNHK-#PjnAwjK^g^=P0 zJX4Zi(&5Y(<=ZBQrvL*72mpeif<9@vOyUiGr;5XaMYIjaSs}1k)uSapoQ zq-&Q&5vW9ZIIuFES!3VtiQBHqD7j#PAP$|wY@^w7{oUrCq<*?JlhR*RnA9k2mjv(5 zHT>eSSR!!Jt6f<~KBcw|_anQ;o=r?5Q#F>UED4?+S2 zX$>Re)b@*V8YqxfS>5Bj!mU3lAX+Q?4A-9JS|m2Mn7Qln8fPW`IZh|pz!_3*wt~7c z&&FA}Obsq>sWisbfW|+LJt9p5XV5p(Wv&jp#UiAgcBfUZ*+A+~IdW(HMWhYRaiD`@ zfGYsmDUS0AQ7CL4y0wU51Q-zr00{*GSykuu0vikf3JDN{+pad6HT*BA5CH#1MHBN~ zE}WzJOA(wOUr|Gl%`2YX_ literal 0 HcmV?d00001 diff --git a/meta-refkit-core/files/gnupg/refkit-development-signing@key.sec b/meta-refkit-core/files/gnupg/refkit-development-signing@key.sec new file mode 100644 index 0000000000000000000000000000000000000000..d6910ecdacb7f71a6e1f532985acd2381918879c GIT binary patch literal 1699 zcmV;U23+}-15*T9Rp<5*2ms<*Ees>KpE*J>4ZDCT-+SedFSh|$@}Et@aqG>X`v)O6 zbK1@yQQ;Wdu8PKMN`u+Q7fUwO!{4rmUk@{0N|SvPZtA-}$R_jwVs{J@o%@i@sm*QTC z^~DWZ>|jJ6qe8?q`fC9I-@0`Lg?I)wx*1t$-URZR&`1)o4hNW7X2tkrl%}8&2msD# zGH77~y105YB(lkH(Ys*1F{#6(<&-3`q@DGe;n^c=xu@rg_w{W*0m%hSF^MW#b3P&c zs%0AFbHmrw9pm@K(?Z5_397XD7#p-0X!RZ^^tED1i95cL1xySyu5AfdSwj}>l2e7M zS+0Ibr|9zw<8^bG70iKE;b(N*)+>%YV$@YT{P!%S{$^{TDJ2V-QdruFZasc$N$4v| zj{LZcEj!-QQi=3*ushm~Mpo^^QiHb*%M;ijC}@(6-`DX{>1M!p+Vl2u2XJNw&M0dx z4RjSO=sJGT==P-v<&|YTy2b%r6?b=(zxj^K7(wC_p6?uAX832~ImO9dVG#FW#B zJ~OJRm)dN;YD16Et3|xK$bdE?b@*h+4EaBJ@*k z9nX=5oLuXkO+PuNPz#ncwF_ZnvdsIug4~n3q8MxR;R;r$71jxo$z(4p6^hg?zP-f| zZhs-T-oSxd0}NhL)zs;9>ddKDo)_?W(Yt(fqT=>+oFS!w4ZBka-WlC(0090jd(jre zq20;#Ji2WVIFLqy!6^<0yDCTZN_hXM7Ge<(qqHnjX=iR}Zf77%WqBYxa%E;~X>=`Q zWp-t3Z*Xm8ZgeekX=iR}Zf8JiWqCe`dIS>@2mm4l0$Ek(_5vFt1`7!Y2Ll2I6$kX|9fk;$@w8)dC zV!l%A_hJqz>j40%FJpxGUFP=UhF0#$%JGN|{D&X@B3D&rRpVg}2y?id0zCv-Rp<5) z2mqv~pbf;-#Ox5TOl2&eNYB6Q(&e!h3TGE_FI%%sr#{YkDoUm2U<#uvda*7 zjaHs<@kk2qze0nlSmLvECcfOq`52U1 ziv;!&Q4eeS_o)8@9>)$J&Sq0`_*rv8gvn<5X5FIRhoc@bR__r3%Vlv#H5^j2<-5IH zv+PGBnEgx=Zb=5oUot*YEF)#Rg{kdd7E0i<9)?zd-b}7Y4a%0Z{_!D;?N&8Lbjfwo zc7idHMO9U(!jlZBAt2Ru`fU((^s7vey57VgGICr$5|n;Ske(dN{B$ompew z?}^*4$|$*Dfglc@!)&A3a{b-ro}_-dHIvd`RhZN$Y?lP@&Nckvu~;LTbvgN|x4A6x zqMC&EgU;z4Aj2<8$bv?jQFvrnnVE44->0xV|1oXpFAqWj1ZfQ;yTu};opz^Huh~HAPdRdD{Y9h=&T*iFVt^|E*(r|m2~j9)9=f#v z0aOhhyW`ST<_~<^crUCS_h*^)y&-P${v1`0RcDD+VNac;v-^}iEET*EHx+k?VFVZv z2mlEM0$Ek(_5vFW0162ZgWIk)nl=0{s1N}F&bS$XF!kydl!YK;o!&WWlhU*#xcldf tT>Tze&$2^i0RAxwaw@~4dPZBQydXA=Cxfav9?m99UxgaFr?2RKpE*J>4ZDCT-+SedFSh|$@}Et@aqG>X`v)O6 zbK1@yQQ;Wdu8PKMN`u+Q7fUwO!{4rmUk@{0N|SvPZtA-}$R_jwVs{J@o%@i@sm*QTC z^~DWZ>|jJ6qe8?q`fC9I-@0`Lg?I)wx*1t$-URZR&`1)o4hNW7X2tkrl%}8&2msD# zGH77~y105YB(lkH(Ys*1F{#6(<&-3`q@DGe;n^c=xu@rg_w{W*0m%hSF^MW#b3P&c zs%0AFbHmrw9pm@K(?Z5_397XD7#p-0X!RZ^^tED1i95cL1xySyu5AfdSwj}>l2e7M zS+0Ibr|9zw<8^bG70iKE;b(N*)+>%YV$@YT{P!%S{$^{TDJ2V-QdruFZasc$N$4v| zj{LZcEj!-QQi=3*ushm~Mpo^^QiHb*%M;ijC}@(6-`DX{>1M!p+Vl2u2XJNw&M0dx z4RjSO=sJGT==P-v<&|YTy2b%r6?b=(zxj^K7(wC_p6?uAX832~ImO9dVG#FW#B zJ~OJRm)dN;YD16Et3|xK$bdE?b@*h+4EaBJ@*k z9nX=5oLuXkO+PuNPz#ncwF_ZnvdsIug4~n3q8MxR;R;r$71jxo$z(4p6^hg?zP-f| zZhs-T-oSxd0}NhL)zs;9>ddKDo)_?W(Yt(fqT=>+oFS!w4ZBka-WlC(0090jd(jre zq20;#Ji2WVIFLqy!6^<0yDCTZN_hXM7Ge<(qqHnjX=iR}Zf77%WqBYxa%E;~X>=`Q zWp-t3Z*Xm8ZgeekX=iR}Zf8JiWqCe`dIS>@2mm4l0$Ek(_5vFt1`7!Y2Ll2I6$kX|9fk;$@w8)dC zV!l%A_hJqz>j40%FJpxGUFP=UhF0#$%JGN|{D&X@B3D&rRpVg}2y?iw0ssJ=0zCv- zRp<5)2mqv~pbf;-#Ox5TOl2&eNYB6Q(&e!h3TGE_FI%%sr#{YkDoUm2U<#u zvda*7jaHs<@kk2qze0nlSmLvECcfOq z`52U1iv;!&Q4eeS_o)8@9>)$J&Sq0`_*rv8gvn<5X5FIRhoc@bR__r3%Vlv#H5^j2 z<-5IHv+PGBnEgx=Zb=5oUot*YEF)#Rg{kdd7E0i<9)?zd-b}7Y4a%0Z{_!D;?N&8L zbjfwoc7idHMO9U(!jlZBAt2Ru`fU((^s7vey57VgGICr$5|n;Ske(dN{B$ zompew?}^*4$|$*Dfglc@!)&A3a{b-ro}_-dHIvd`RhZN$Y?lP@&Nckvu~;LTbvgN| zx4A6xqMC&EgU;z4Aj2<8$bv?jQFvrnnVE44->0xV|1oXpFAqWj1ZfQ;yTu};opz^Huh~HAPdRdD{Y9h=&T*iFVt^|E*(r|m2~j9) z9=f#v0aOhhyW`ST<_~<^crUCS_h*^)y&-P${v1`0RcDD+VNac;v-^}iEET*EHx+k? zVFVZv2mlEM0$Ek(_5vFW0162ZgWIk)nl=0{s1N}F&bS$XF!kydl!YK;o!&WWlhU*# zxcldfT>Tze&$2^i0RAxwaw@~4dPZBQydXA=Cxfav9?m99UxgaFr?2R Date: Fri, 7 Jul 2017 17:09:16 +0300 Subject: [PATCH 19/21] meta-flatpak: use in-repo GPG home/keyring for signing commits. Use the in-repo pre-populated GPG home directory/keyring for signing commits in the flatpak ostree repositories we generate. Also switch to using the same development signing key as we use for signing our base OS ostree repository. Signed-off-by: Krisztian Litkey --- meta-flatpak/classes/flatpak-config.bbclass | 8 ++++--- .../classes/flatpak-repository.bbclass | 9 ++------ .../classes/refkit-signing-keys.bbclass | 22 +++++++++++++++++-- 3 files changed, 27 insertions(+), 12 deletions(-) diff --git a/meta-flatpak/classes/flatpak-config.bbclass b/meta-flatpak/classes/flatpak-config.bbclass index 188cd82e5a..3bd2f7bf01 100644 --- a/meta-flatpak/classes/flatpak-config.bbclass +++ b/meta-flatpak/classes/flatpak-config.bbclass @@ -60,9 +60,11 @@ FLATPAK_BRANCH ?= "${DISTRO_VERSION}" FLATPAK_LATEST ?= "${DISTRO}/${FLATPAK_PN}/latest" FLATPAK_BUILD ?= "${DISTRO}/${FLATPAK_PN}/build/${BUILD_ID}" -# This is the GPG key id of our repository signing key. If you set this to -# empty, signing is disabled altogether. -FLATPAK_GPGID ?= "refkit-signing@key" +# This is the GPG homedir and the key ID for signing repository commits. If +# you set the key ID to empty, signing is disabled altogether. +FLATPAK_GPGDIR ?= "${FLATPAKBASE}/../meta-refkit-core/files/gnupg" +FLATPAK_GPGID ?= "${@d.getVar('DISTRO').replace(' ', '_') + \ + '-development-signing@key'}" # We can pre-populate the image during build with a set of pre-declared # flatpak repositories and associated dedicated flatpak-session users. diff --git a/meta-flatpak/classes/flatpak-repository.bbclass b/meta-flatpak/classes/flatpak-repository.bbclass index 9ca4c7a101..8e5a6717b6 100644 --- a/meta-flatpak/classes/flatpak-repository.bbclass +++ b/meta-flatpak/classes/flatpak-repository.bbclass @@ -4,9 +4,6 @@ REQUIRED_DISTRO_FEATURES_append = " usrmerge systemd pam" inherit flatpak-config -REFKIT_SIGNING_KEYS += "${FLATPAK_GPGID}" -inherit refkit-signing-keys - # These are lists of files we check to determine the flatpak # runtime type of an image if it is not directly visible from # the image name. This did not used to be necessary before we @@ -64,8 +61,7 @@ fakeroot do_flatpak_populate_repository () { echo "${IMAGE_BASENAME} is a flatpak $RUNTIME_TYPE image" if [ -n "${FLATPAK_GPGID}" ]; then - GPG_SIGN="--gpg-home ${REFKIT_SIGNING_GPGDIR} \ - --gpg-id ${FLATPAK_GPGID}" + GPG_SIGN="--gpg-home ${FLATPAK_GPGDIR} --gpg-id ${FLATPAK_GPGID}" else GPG_SIGN="" fi @@ -153,8 +149,7 @@ fakeroot do_flatpak_export_repository () { echo "${IMAGE_BASENAME} is a flatpak $RUNTIME_TYPE image" if [ -n "${FLATPAK_GPGID}" ]; then - GPG_SIGN="--gpg-home ${REFKIT_SIGNING_GPGDIR} \ - --gpg-id ${FLATPAK_GPGID}" + GPG_SIGN="--gpg-home ${FLATPAK_GPGDIR} --gpg-id ${FLATPAK_GPGID}" else GPG_SIGN="" fi diff --git a/meta-flatpak/classes/refkit-signing-keys.bbclass b/meta-flatpak/classes/refkit-signing-keys.bbclass index 3e4fe7f48d..ffda2882e6 100644 --- a/meta-flatpak/classes/refkit-signing-keys.bbclass +++ b/meta-flatpak/classes/refkit-signing-keys.bbclass @@ -7,6 +7,12 @@ # local.conf (or some other global configuration file) by setting # REFKIT_SIGNING_KEYS to necessary key IDs. +# This is the directory we look for pre-generated keys in. If we find a +# pre-generated key pair for any key id (we assume the key files be named +# as .pub and .sec) we import those instead of generating +# them anew. +REFKIT_SIGNING_KEYDIR ?= "${FLATPAK_LAYERDIR}/../meta-refkit-core/files/gpg-keys" + # Signing keys to generate, a list of key IDs. REFKIT_SIGNING_KEYS ?= "" @@ -43,8 +49,8 @@ fakeroot do_generate_signing_keys () { while [ $slept -lt ${REFKIT_SIGNING_TIMEOUT} ]; do if [ ! -e ${dir}/$id.sec ]; then echo "Waiting for generation of signing key $id..." - sleep 1 - slept=$( expr $slept + 1 ) + sleep 3 + slept=$( expr $slept + 3 ) else echo "Got signing key $id..." break @@ -63,6 +69,18 @@ fakeroot do_generate_signing_keys () { for id in ${REFKIT_SIGNING_KEYS}; do pubkey="$dir/$id.pub" seckey="$dir/$id.sec" + pubpre="${REFKIT_SIGNING_KEYDIR}/$id.pub" + secpre="${REFKIT_SIGNING_KEYDIR}/$id.sec" + + if [ -e $pubpre -a -e $secpre ]; then + echo "Re-using pre-generated key-pair $pubpre/$secpre..." + # gpg-keygen.sh below will import these keys. It actually + # races with any conflicting task waiting for the keys above, + # but that should be okay...ish... fast importing winning. + mkdir -p $dir + cp $pubpre $pubkey + cp $secpre $seckey + fi # Generate repository signing GPG keys, if we don't have them yet. echo "Generating/checking signing key $id..." From 5c337cc3f7675e4a1773d45d5d9fc603ecf58f09 Mon Sep 17 00:00:00 2001 From: Krisztian Litkey Date: Sat, 8 Jul 2017 18:29:56 +0300 Subject: [PATCH 20/21] ostree-image.bbclass: use in-repo GPG home/keyring for signing. Use the in-repo prepopulated GPG homedir/keyring for signing ostree repository commits. Updated doc/howtos/OSTree.rst to reflect these changes. Signed-off-by: Krisztian Litkey --- doc/howtos/OSTree.rst | 33 +++++++++++-------- doc/system-update.rst | 10 +++--- meta-refkit-core/classes/ostree-image.bbclass | 16 +++------ 3 files changed, 28 insertions(+), 31 deletions(-) diff --git a/doc/howtos/OSTree.rst b/doc/howtos/OSTree.rst index 8f6ff01ce2..a2b37b99c9 100644 --- a/doc/howtos/OSTree.rst +++ b/doc/howtos/OSTree.rst @@ -35,12 +35,23 @@ or other suitable global configuration file:: REFKIT_IMAGE_EXTRA_FEATURES += "ostree" -To use your GPG signing key pair for signing and verifying the OSTree -repository commits, assuming your keys are in the keyrings in -with key ID release@example.org, add the following to your local.conf or -other suitable global Yocto configuration file:: +For development images (*REFKIT_IMAGE_MODE* set to *'development'*), there +is no need to generate any keys. The default configuration uses pregenerated +keys already present in the git repository for such images. For production +images you can generate a pair of signing keys with the following command:: - OSTREE_GPGDIR = "" + meta-refkit-core/files/gnupg/generate-keys.sh $(pwd)/gpg release@example.org + +This will generate a pair of private and public GPG keys with the key ID +release@example.org, put them into the GPG keyring at $(pwd)/gpg and also +export them to the files $(pwd)/release@example.org.{pub,sec}. + +To use these newly generated keys for signing and verifying the OSTree +repository commits, add the following to your local.conf or other suitable +global configuration file (replace $(pwd) with the path to your +top-level intel-iot-refkit directory):: + + OSTREE_GPGDIR = "$(pwd)/gpg" OSTREE_GPGID = "release@example.org" Assuming you want to use your build machine, build.example.org, in this @@ -86,16 +97,10 @@ following commands:: ln -sf ostree-repo ostree ostree trivial-httpd --port 80 -A third alternative is use a simple Python HTTP server, for instance the -one from the project at:: - - http://git.yoctoproject.org/cgit/cgit.cgi/poky/tree/meta/lib/oeqa/utils/httpserver.py - -which is also available in the refkit source tree as:: - - openembedded-core/meta/lib/oeqa/utils/httpserver.py +A third alternative is to use a simple Python HTTP server, for instance +the `one _` from OpenEmbedded Core. -Now with teh above configuration in place, and an HTTP server running, +Now with the above configuration in place, and an HTTP server running, subsequent builds should get automatically exported and pulled in as updates by the clients running one of your refkit images. diff --git a/doc/system-update.rst b/doc/system-update.rst index e446b54820..f6bcdda056 100644 --- a/doc/system-update.rst +++ b/doc/system-update.rst @@ -121,16 +121,14 @@ publishing the permanent OSTree repository require some planning and customization. Also check the :file:`howtos/OSTree.rst` for more instructions and tips on how to use OSTree for system updates. -In development images, the default is to use a generated GPG key from -:file:`tmp-glibc/deploy/gnupg/` and a "permanent" OSTree repository in -:file:`tmp-glibc/deploy/ostree-repo/`. In other words, removing -:file:`tmp-glibc` really starts from scratch. +In development images, the default is to use a pregenerated GPG key from +the pregenerated GPG keyring in :file:`meta-refkit-core/files/gnupg` and a +"permanent" OSTree repository in :file:`tmp-glibc/deploy/ostree-repo/`. +In other words, removing :file:`tmp-glibc` really starts from scratch. Extra work is necessary when images from previous builds are still meant to be updateable: -#. The GPG key must be stored elsewhere (see ``OSTREE_GPGDIR`` and - ``OSTREE_GPGID``). #. The public OSTree repo must be stored elsewhere (see ``OSTREE_REPO``) *or* #. after a successful build, the new commit in :file:`tmp-glibc/deploy/ostree-repo/` must be moved to a different, more permanent OSTree repo with the diff --git a/meta-refkit-core/classes/ostree-image.bbclass b/meta-refkit-core/classes/ostree-image.bbclass index 2e19620e01..ddda2cbc40 100644 --- a/meta-refkit-core/classes/ostree-image.bbclass +++ b/meta-refkit-core/classes/ostree-image.bbclass @@ -66,23 +66,17 @@ OSTREE_COMMIT_SUBJECT ?= 'Build ${BUILD_ID} of ${PN} in ${DISTRO}' # This can be set to an empty string to disable publishing. OSTREE_REPO ?= "${DEPLOY_DIR}/ostree-repo" -# OSTREE_GPGDIR is where our GPG keyring is generated/located at and +# OSTREE_GPGDIR is where our GPG keyring is located at and # OSTREE_GPGID is the default key ID we use to sign (commits in) the # repository. These two need to be customized for real builds. # -# In development images the default is to use a key that gets -# generated automatically on demand by refkit-signing-keys.bbclass. -# Production images do not have a default. +# In development images the default is to use a pregenerated key from +# an in-repo keyring. Production images do not have a default. # -# Beware that generating keys can hang when the kernel runs out -# of entropy for /dev/random, and the current refkit-signing-keys.bbclass -# does not handle setting REFKIT_SIGNING_KEY in image recipes like -# we do here. TODO: replace with pre-generated keys? -OSTREE_GPGDIR ?= "${REFKIT_SIGNING_GPGDIR}" +OSTREE_GPGDIR ?= "${@ '' if (d.getVar('IMAGE_MODE') or 'production') == 'production' else '${META_REFKIT_CORE_BASE}/files/gnupg' }" OSTREE_GPGID_DEFAULT = "${@d.getVar('DISTRO').replace(' ', '_') + '-development-signing@key'}" -REFKIT_SIGNING_KEYS_append = " ${OSTREE_GPGID_DEFAULT}" OSTREE_GPGID ?= "${@ '' if (d.getVar('IMAGE_MODE') or 'production') == 'production' else '${OSTREE_GPGID_DEFAULT}' }" -inherit refkit-signing-keys + python () { if bb.utils.contains('IMAGE_FEATURES', 'ostree', True, False, d) and \ not d.getVar('OSTREE_GPGID'): From 61b863da9c4c5639067b9d71b7762701b6555d19 Mon Sep 17 00:00:00 2001 From: Krisztian Litkey Date: Tue, 11 Jul 2017 21:57:35 +0300 Subject: [PATCH 21/21] ostree selftest: stop refkit-update systemd service. Stop refkit-update systemd service before starting the HTTP server and thus making the update available, to prevent the updater systemd service racing with us, potentially winning and doing a full update cycle including the final reboot. Signed-off-by: Krisztian Litkey --- .../lib/oeqa/selftest/cases/refkit_ostree.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/meta-refkit-core/lib/oeqa/selftest/cases/refkit_ostree.py b/meta-refkit-core/lib/oeqa/selftest/cases/refkit_ostree.py index 561391b45e..710c0402b8 100644 --- a/meta-refkit-core/lib/oeqa/selftest/cases/refkit_ostree.py +++ b/meta-refkit-core/lib/oeqa/selftest/cases/refkit_ostree.py @@ -60,6 +60,13 @@ def boot_image(self, overrides): runqemuparams='ovmf slirp nographic', image_fstype='wic') + def stop_update_service(self, qemu): + cmd = '''systemctl stop refkit-update.service''' + status, output = qemu.run_serial(cmd, timeout=600) + self.assertEqual(1, status, 'Failed to run command "%s":\n%s' % (cmd, output)) + self.logger.info('Successfully stopped refkit-update systemd service:\n%s' % output) + return True + def update_image(self, qemu): # We need to bring up some simple HTTP server for the # OSTree repo. We cannot get the actual OSTREE_REPO for the @@ -70,6 +77,12 @@ def update_image(self, qemu): old_cwd = os.getcwd() server = None try: + # We need to stop the refkit-udpate systemd service before starting + # the HTTP server (and thus making any update available) to prevent + # the service from racing with us and potentially winning, doing a + # full update cycle including a final reboot. + self.stop_update_service(qemu) + os.chdir(ostree_repo) class OSTreeHTTPRequestHandler(http.server.SimpleHTTPRequestHandler): def log_message(s, format, *args):