Add build stuff
This commit is contained in:
parent
7370a77c88
commit
f0e1d81647
21 changed files with 4880 additions and 2 deletions
28
.github/workflows/kernel.yml
vendored
Normal file
28
.github/workflows/kernel.yml
vendored
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
name: KBuild
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
kb:
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Clean
|
||||||
|
uses: rokibhasansagar/slimhub_actions@main
|
||||||
|
- name: Install tools
|
||||||
|
run: |
|
||||||
|
sudo apt-get update -y &>/dev/null || sudo apt-get update -y &>/dev/null || true
|
||||||
|
sudo apt-get upgrade -y &>/dev/null || sudo apt-get upgrade -y &>/dev/null || true
|
||||||
|
sudo apt-get install brotli zip zstd tar lz4 cpio xz-utils -y || sudo apt-get install brotli zip zstd tar lz4 cpio xz-utils -y
|
||||||
|
- name: Build
|
||||||
|
run: |
|
||||||
|
curl -LSs "https://raw.githubusercontent.com/tiann/KernelSU/main/kernel/setup.sh" | bash -
|
||||||
|
bash build.sh
|
||||||
|
- name: Upload
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: Kernels_a53x
|
||||||
|
path: kernel_build/ExynosUnbound*
|
|
@ -813,9 +813,9 @@ CONFIG_HAVE_GCC_PLUGINS=y
|
||||||
CONFIG_RT_MUTEXES=y
|
CONFIG_RT_MUTEXES=y
|
||||||
CONFIG_BASE_SMALL=0
|
CONFIG_BASE_SMALL=0
|
||||||
CONFIG_MODULES=y
|
CONFIG_MODULES=y
|
||||||
CONFIG_MODULE_FORCE_LOAD=y
|
# CONFIG_MODULE_FORCE_LOAD is not set
|
||||||
CONFIG_MODULE_UNLOAD=y
|
CONFIG_MODULE_UNLOAD=y
|
||||||
CONFIG_MODULE_FORCE_UNLOAD=y
|
# CONFIG_MODULE_FORCE_UNLOAD is not set
|
||||||
CONFIG_MODVERSIONS=y
|
CONFIG_MODVERSIONS=y
|
||||||
CONFIG_ASM_MODVERSIONS=y
|
CONFIG_ASM_MODVERSIONS=y
|
||||||
# CONFIG_MODULE_SRCVERSION_ALL is not set
|
# CONFIG_MODULE_SRCVERSION_ALL is not set
|
||||||
|
|
BIN
kernel_build/bin/python2
Executable file
BIN
kernel_build/bin/python2
Executable file
Binary file not shown.
BIN
kernel_build/boot/ramdisk
Executable file
BIN
kernel_build/boot/ramdisk
Executable file
Binary file not shown.
186
kernel_build/build.sh
Executable file
186
kernel_build/build.sh
Executable file
|
@ -0,0 +1,186 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
XY_VERSION="R3.0"
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
if [ -z "$1" ]; then
|
||||||
|
echo "Please exec from root directory"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
cd "$1"
|
||||||
|
|
||||||
|
if [ "$(uname -m)" != "x86_64" ]; then
|
||||||
|
echo "This script requires an x86_64 (64-bit) machine."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
export PATH="$(pwd)/kernel_build/bin:$PATH"
|
||||||
|
|
||||||
|
# Configs
|
||||||
|
OUTDIR="$(pwd)/out"
|
||||||
|
MODULES_OUTDIR="$(pwd)/modules_out"
|
||||||
|
TMPDIR="$(pwd)/kernel_build/tmp"
|
||||||
|
|
||||||
|
IN_PLATFORM="$(pwd)/kernel_build/vboot_platform"
|
||||||
|
IN_DLKM="$(pwd)/kernel_build/vboot_dlkm"
|
||||||
|
IN_DTB="$OUTDIR/arch/arm64/boot/dts/exynos/s5e8825.dtb"
|
||||||
|
|
||||||
|
PLATFORM_RAMDISK_DIR="$TMPDIR/ramdisk_platform"
|
||||||
|
DLKM_RAMDISK_DIR="$TMPDIR/ramdisk_dlkm"
|
||||||
|
PREBUILT_RAMDISK="$(pwd)/kernel_build/boot/ramdisk"
|
||||||
|
MODULES_DIR="$DLKM_RAMDISK_DIR/lib/modules"
|
||||||
|
|
||||||
|
MKBOOTIMG="$(pwd)/kernel_build/mkbootimg/mkbootimg.py"
|
||||||
|
MKDTBOIMG="$(pwd)/kernel_build/dtb/mkdtboimg.py"
|
||||||
|
|
||||||
|
OUT_KERNELZIP="$(pwd)/kernel_build/ExynosUnbound-${XY_VERSION}_a53x.zip"
|
||||||
|
OUT_KERNELTAR="$(pwd)/kernel_build/ExynosUnbound-${XY_VERSION}_a53x.tar"
|
||||||
|
OUT_KERNEL="$OUTDIR/arch/arm64/boot/Image"
|
||||||
|
OUT_BOOTIMG="$(pwd)/kernel_build/zip/boot.img"
|
||||||
|
OUT_VENDORBOOTIMG="$(pwd)/kernel_build/zip/vendor_boot.img"
|
||||||
|
OUT_DTBIMAGE="$TMPDIR/dtb.img"
|
||||||
|
|
||||||
|
# Kernel-side
|
||||||
|
BUILD_ARGS="LOCALVERSION=-XyUnbound-${XY_VERSION} KBUILD_BUILD_USER=Gabriel260BR KBUILD_BUILD_HOST=ExynosUnbound"
|
||||||
|
|
||||||
|
kfinish() {
|
||||||
|
rm -rf "$TMPDIR"
|
||||||
|
rm -rf "$OUTDIR"
|
||||||
|
rm -rf "$MODULES_OUTDIR"
|
||||||
|
}
|
||||||
|
|
||||||
|
kfinish
|
||||||
|
|
||||||
|
DIR="$(readlink -f .)"
|
||||||
|
PARENT_DIR="$(readlink -f ${DIR}/..)"
|
||||||
|
|
||||||
|
export CROSS_COMPILE="$PARENT_DIR/clang-r416183b/bin/aarch64-linux-gnu-"
|
||||||
|
export CC="$PARENT_DIR/clang-r416183b/bin/clang"
|
||||||
|
|
||||||
|
export PLATFORM_VERSION=12
|
||||||
|
export ANDROID_MAJOR_VERSION=s
|
||||||
|
export PATH="$PARENT_DIR/build-tools/path/linux-x86:$PARENT_DIR/clang-r416183b/bin:$PATH"
|
||||||
|
export TARGET_SOC=s5e8825
|
||||||
|
export LLVM=1 LLVM_IAS=1
|
||||||
|
export ARCH=arm64
|
||||||
|
|
||||||
|
if [ ! -d "$PARENT_DIR/clang-r416183b" ]; then
|
||||||
|
git clone https://github.com/crdroidandroid/android_prebuilts_clang_host_linux-x86_clang-r416183b "$PARENT_DIR/clang-r416183b" --depth=1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ! -d "$PARENT_DIR/build-tools" ]; then
|
||||||
|
git clone https://android.googlesource.com/platform/prebuilts/build-tools "$PARENT_DIR/build-tools" --depth=1
|
||||||
|
fi
|
||||||
|
|
||||||
|
make -j$(nproc --all) -C $(pwd) O=out $BUILD_ARGS a53x_defconfig >/dev/null
|
||||||
|
make -j$(nproc --all) -C $(pwd) O=out $BUILD_ARGS dtbs >/dev/null
|
||||||
|
make -j$(nproc --all) -C $(pwd) O=out $BUILD_ARGS >/dev/null
|
||||||
|
make -j$(nproc --all) -C $(pwd) O=out INSTALL_MOD_STRIP="--strip-debug --keep-section=.ARM.attributes" INSTALL_MOD_PATH="$MODULES_OUTDIR" modules_install >/dev/null
|
||||||
|
|
||||||
|
rm -rf "$TMPDIR"
|
||||||
|
rm -f "$OUT_BOOTIMG"
|
||||||
|
rm -f "$OUT_VENDORBOOTIMG"
|
||||||
|
mkdir "$TMPDIR"
|
||||||
|
mkdir -p "$MODULES_DIR/0.0"
|
||||||
|
mkdir "$PLATFORM_RAMDISK_DIR"
|
||||||
|
|
||||||
|
cp -rf "$IN_PLATFORM"/* "$PLATFORM_RAMDISK_DIR/"
|
||||||
|
mkdir "$PLATFORM_RAMDISK_DIR/first_stage_ramdisk"
|
||||||
|
cp -f "$PLATFORM_RAMDISK_DIR/fstab.s5e8825" "$PLATFORM_RAMDISK_DIR/first_stage_ramdisk/fstab.s5e8825"
|
||||||
|
|
||||||
|
if ! find "$MODULES_OUTDIR/lib/modules" -mindepth 1 -type d | read; then
|
||||||
|
echo "Unknown error!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
missing_modules=""
|
||||||
|
|
||||||
|
for module in $(cat "$IN_DLKM/modules.load"); do
|
||||||
|
i=$(find "$MODULES_OUTDIR/lib/modules" -name $module);
|
||||||
|
if [ -f "$i" ]; then
|
||||||
|
cp -f "$i" "$MODULES_DIR/0.0/$module"
|
||||||
|
else
|
||||||
|
missing_modules="$missing_modules $module"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ "$missing_modules" != "" ]; then
|
||||||
|
echo "ERROR: the following modules were not found: $missing_modules"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
depmod 0.0 -b "$DLKM_RAMDISK_DIR"
|
||||||
|
sed -i 's/\([^ ]\+\)/\/lib\/modules\/\1/g' "$MODULES_DIR/0.0/modules.dep"
|
||||||
|
cd "$MODULES_DIR/0.0"
|
||||||
|
for i in $(find . -name "modules.*" -type f); do
|
||||||
|
if [ $(basename "$i") != "modules.dep" ] && [ $(basename "$i") != "modules.softdep" ] && [ $(basename "$i") != "modules.alias" ]; then
|
||||||
|
rm -f "$i"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
cd "$DIR"
|
||||||
|
|
||||||
|
cp -f "$IN_DLKM/modules.load" "$MODULES_DIR/0.0/modules.load"
|
||||||
|
mv "$MODULES_DIR/0.0"/* "$MODULES_DIR/"
|
||||||
|
rm -rf "$MODULES_DIR/0.0"
|
||||||
|
|
||||||
|
echo "Building dtb image..."
|
||||||
|
python2 "$MKDTBOIMG" create "$OUT_DTBIMAGE" --custom0=0x00000000 --custom1=0xff000000 --version=0 --page_size=2048 "$IN_DTB" || exit 1
|
||||||
|
|
||||||
|
echo "Building boot image..."
|
||||||
|
|
||||||
|
$MKBOOTIMG --header_version 4 \
|
||||||
|
--kernel "$OUT_KERNEL" \
|
||||||
|
--output "$OUT_BOOTIMG" \
|
||||||
|
--ramdisk "$PREBUILT_RAMDISK" \
|
||||||
|
--os_version 12.0.0 \
|
||||||
|
--os_patch_level 2024-01 || exit 1
|
||||||
|
|
||||||
|
echo "Done!"
|
||||||
|
echo "Building vendor_boot image..."
|
||||||
|
|
||||||
|
cd "$DLKM_RAMDISK_DIR"
|
||||||
|
find . | cpio --quiet -o -H newc -R root:root | lz4 -9cl > ../ramdisk_dlkm.lz4
|
||||||
|
cd ../ramdisk_platform
|
||||||
|
find . | cpio --quiet -o -H newc -R root:root | lz4 -9cl > ../ramdisk_platform.lz4
|
||||||
|
cd ..
|
||||||
|
echo "buildtime_bootconfig=enable" > bootconfig
|
||||||
|
|
||||||
|
$MKBOOTIMG --header_version 4 \
|
||||||
|
--vendor_boot "$OUT_VENDORBOOTIMG" \
|
||||||
|
--vendor_bootconfig "$(pwd)/bootconfig" \
|
||||||
|
--dtb "$OUT_DTBIMAGE" \
|
||||||
|
--vendor_ramdisk "$(pwd)/ramdisk_platform.lz4" \
|
||||||
|
--ramdisk_type dlkm \
|
||||||
|
--ramdisk_name dlkm \
|
||||||
|
--vendor_ramdisk_fragment "$(pwd)/ramdisk_dlkm.lz4" \
|
||||||
|
--os_version 12.0.0 \
|
||||||
|
--os_patch_level 2024-01 || exit 1
|
||||||
|
|
||||||
|
cd "$DIR"
|
||||||
|
|
||||||
|
echo "Done!"
|
||||||
|
|
||||||
|
echo "Building zip..."
|
||||||
|
cd "$(pwd)/kernel_build/zip"
|
||||||
|
rm -f "$OUT_KERNELZIP"
|
||||||
|
brotli --quality=11 -c boot.img > boot.br
|
||||||
|
brotli --quality=11 -c vendor_boot.img > vendor_boot.br
|
||||||
|
zip -r9 -q "$OUT_KERNELZIP" META-INF boot.br vendor_boot.br
|
||||||
|
rm -f boot.br vendor_boot.br
|
||||||
|
cd "$DIR"
|
||||||
|
echo "Done! Output: $OUT_KERNELZIP"
|
||||||
|
|
||||||
|
echo "Building tar..."
|
||||||
|
cd "$(pwd)/kernel_build"
|
||||||
|
rm -f "$OUT_KERNELTAR"
|
||||||
|
lz4 -c -12 -B6 --content-size "$OUT_BOOTIMG" > boot.img.lz4
|
||||||
|
lz4 -c -12 -B6 --content-size "$OUT_VENDORBOOTIMG" > vendor_boot.img.lz4
|
||||||
|
tar -cf "$OUT_KERNELTAR" boot.img.lz4 vendor_boot.img.lz4
|
||||||
|
cd "$DIR"
|
||||||
|
rm -f boot.img.lz4 vendor_boot.img.lz4
|
||||||
|
echo "Done! Output: $OUT_KERNELTAR"
|
||||||
|
|
||||||
|
echo "Cleaning..."
|
||||||
|
rm -f "${OUT_VENDORBOOTIMG}" "${OUT_BOOTIMG}"
|
||||||
|
kfinish
|
886
kernel_build/dtb/mkdtboimg.py
Executable file
886
kernel_build/dtb/mkdtboimg.py
Executable file
|
@ -0,0 +1,886 @@
|
||||||
|
#! /usr/bin/env python2
|
||||||
|
# Copyright 2017, The Android Open Source Project
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
from __future__ import print_function
|
||||||
|
"""Tool for packing multiple DTB/DTBO files into a single image"""
|
||||||
|
import argparse
|
||||||
|
import fnmatch
|
||||||
|
import os
|
||||||
|
import struct
|
||||||
|
import zlib
|
||||||
|
from array import array
|
||||||
|
from collections import namedtuple
|
||||||
|
from sys import stdout
|
||||||
|
class CompressionFormat(object):
|
||||||
|
"""Enum representing DT compression format for a DT entry.
|
||||||
|
"""
|
||||||
|
NO_COMPRESSION = 0x00
|
||||||
|
ZLIB_COMPRESSION = 0x01
|
||||||
|
GZIP_COMPRESSION = 0x02
|
||||||
|
class DtEntry(object):
|
||||||
|
"""Provides individual DT image file arguments to be added to a DTBO.
|
||||||
|
Attributes:
|
||||||
|
REQUIRED_KEYS_V0: 'keys' needed to be present in the dictionary passed to instantiate
|
||||||
|
an object of this class when a DTBO header of version 0 is used.
|
||||||
|
REQUIRED_KEYS_V1: 'keys' needed to be present in the dictionary passed to instantiate
|
||||||
|
an object of this class when a DTBO header of version 1 is used.
|
||||||
|
COMPRESSION_FORMAT_MASK: Mask to retrieve compression info for DT entry from flags field
|
||||||
|
when a DTBO header of version 1 is used.
|
||||||
|
"""
|
||||||
|
COMPRESSION_FORMAT_MASK = 0x0f
|
||||||
|
REQUIRED_KEYS_V0 = ('dt_file', 'dt_size', 'dt_offset', 'id', 'rev',
|
||||||
|
'custom0', 'custom1', 'custom2', 'custom3')
|
||||||
|
REQUIRED_KEYS_V1 = ('dt_file', 'dt_size', 'dt_offset', 'id', 'rev',
|
||||||
|
'flags', 'custom0', 'custom1', 'custom2')
|
||||||
|
@staticmethod
|
||||||
|
def __get_number_or_prop(arg):
|
||||||
|
"""Converts string to integer or reads the property from DT image.
|
||||||
|
Args:
|
||||||
|
arg: String containing the argument provided on the command line.
|
||||||
|
Returns:
|
||||||
|
An integer property read from DT file or argument string
|
||||||
|
converted to integer
|
||||||
|
"""
|
||||||
|
if not arg or arg[0] == '+' or arg[0] == '-':
|
||||||
|
raise ValueError('Invalid argument passed to DTImage')
|
||||||
|
if arg[0] == '/':
|
||||||
|
# TODO(b/XXX): Use pylibfdt to get property value from DT
|
||||||
|
raise ValueError('Invalid argument passed to DTImage')
|
||||||
|
else:
|
||||||
|
base = 10
|
||||||
|
if arg.startswith('0x') or arg.startswith('0X'):
|
||||||
|
base = 16
|
||||||
|
elif arg.startswith('0'):
|
||||||
|
base = 8
|
||||||
|
return int(arg, base)
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
"""Constructor for DtEntry object.
|
||||||
|
Initializes attributes from dictionary object that contains
|
||||||
|
values keyed with names equivalent to the class's attributes.
|
||||||
|
Args:
|
||||||
|
kwargs: Dictionary object containing values to instantiate
|
||||||
|
class members with. Expected keys in dictionary are from
|
||||||
|
the tuple (_REQUIRED_KEYS)
|
||||||
|
"""
|
||||||
|
self.__version = kwargs['version']
|
||||||
|
required_keys = None
|
||||||
|
if self.__version == 0:
|
||||||
|
required_keys = self.REQUIRED_KEYS_V0
|
||||||
|
elif self.__version == 1:
|
||||||
|
required_keys = self.REQUIRED_KEYS_V1
|
||||||
|
missing_keys = set(required_keys) - set(kwargs)
|
||||||
|
if missing_keys:
|
||||||
|
raise ValueError('Missing keys in DtEntry constructor: %r' %
|
||||||
|
sorted(missing_keys))
|
||||||
|
self.__dt_file = kwargs['dt_file']
|
||||||
|
self.__dt_offset = kwargs['dt_offset']
|
||||||
|
self.__dt_size = kwargs['dt_size']
|
||||||
|
self.__id = self.__get_number_or_prop(kwargs['id'])
|
||||||
|
self.__rev = self.__get_number_or_prop(kwargs['rev'])
|
||||||
|
if self.__version == 1:
|
||||||
|
self.__flags = self.__get_number_or_prop(kwargs['flags'])
|
||||||
|
self.__custom0 = self.__get_number_or_prop(kwargs['custom0'])
|
||||||
|
self.__custom1 = self.__get_number_or_prop(kwargs['custom1'])
|
||||||
|
self.__custom2 = self.__get_number_or_prop(kwargs['custom2'])
|
||||||
|
if self.__version == 0:
|
||||||
|
self.__custom3 = self.__get_number_or_prop(kwargs['custom3'])
|
||||||
|
def __str__(self):
|
||||||
|
sb = []
|
||||||
|
sb.append('{key:>20} = {value:d}'.format(key='dt_size',
|
||||||
|
value=self.__dt_size))
|
||||||
|
sb.append('{key:>20} = {value:d}'.format(key='dt_offset',
|
||||||
|
value=self.__dt_offset))
|
||||||
|
sb.append('{key:>20} = {value:08x}'.format(key='id',
|
||||||
|
value=self.__id))
|
||||||
|
sb.append('{key:>20} = {value:08x}'.format(key='rev',
|
||||||
|
value=self.__rev))
|
||||||
|
if self.__version == 1:
|
||||||
|
sb.append('{key:>20} = {value:08x}'.format(key='flags',
|
||||||
|
value=self.__flags))
|
||||||
|
sb.append('{key:>20} = {value:08x}'.format(key='custom[0]',
|
||||||
|
value=self.__custom0))
|
||||||
|
sb.append('{key:>20} = {value:08x}'.format(key='custom[1]',
|
||||||
|
value=self.__custom1))
|
||||||
|
sb.append('{key:>20} = {value:08x}'.format(key='custom[2]',
|
||||||
|
value=self.__custom2))
|
||||||
|
if self.__version == 0:
|
||||||
|
sb.append('{key:>20} = {value:08x}'.format(key='custom[3]',
|
||||||
|
value=self.__custom3))
|
||||||
|
return '\n'.join(sb)
|
||||||
|
def compression_info(self):
|
||||||
|
"""CompressionFormat: compression format for DT image file.
|
||||||
|
Args:
|
||||||
|
version: Version of DTBO header, compression is only
|
||||||
|
supported from version 1.
|
||||||
|
"""
|
||||||
|
if self.__version == 0:
|
||||||
|
return CompressionFormat.NO_COMPRESSION
|
||||||
|
return self.flags & self.COMPRESSION_FORMAT_MASK
|
||||||
|
@property
|
||||||
|
def dt_file(self):
|
||||||
|
"""file: File handle to the DT image file."""
|
||||||
|
return self.__dt_file
|
||||||
|
@property
|
||||||
|
def size(self):
|
||||||
|
"""int: size in bytes of the DT image file."""
|
||||||
|
return self.__dt_size
|
||||||
|
@size.setter
|
||||||
|
def size(self, value):
|
||||||
|
self.__dt_size = value
|
||||||
|
@property
|
||||||
|
def dt_offset(self):
|
||||||
|
"""int: offset in DTBO file for this DT image."""
|
||||||
|
return self.__dt_offset
|
||||||
|
@dt_offset.setter
|
||||||
|
def dt_offset(self, value):
|
||||||
|
self.__dt_offset = value
|
||||||
|
@property
|
||||||
|
def image_id(self):
|
||||||
|
"""int: DT entry _id for this DT image."""
|
||||||
|
return self.__id
|
||||||
|
@property
|
||||||
|
def rev(self):
|
||||||
|
"""int: DT entry _rev for this DT image."""
|
||||||
|
return self.__rev
|
||||||
|
@property
|
||||||
|
def flags(self):
|
||||||
|
"""int: DT entry _flags for this DT image."""
|
||||||
|
return self.__flags
|
||||||
|
@property
|
||||||
|
def custom0(self):
|
||||||
|
"""int: DT entry _custom0 for this DT image."""
|
||||||
|
return self.__custom0
|
||||||
|
@property
|
||||||
|
def custom1(self):
|
||||||
|
"""int: DT entry _custom1 for this DT image."""
|
||||||
|
return self.__custom1
|
||||||
|
@property
|
||||||
|
def custom2(self):
|
||||||
|
"""int: DT entry custom2 for this DT image."""
|
||||||
|
return self.__custom2
|
||||||
|
@property
|
||||||
|
def custom3(self):
|
||||||
|
"""int: DT entry custom3 for this DT image."""
|
||||||
|
return self.__custom3
|
||||||
|
class Dtbo(object):
|
||||||
|
"""
|
||||||
|
Provides parser, reader, writer for dumping and creating Device Tree Blob
|
||||||
|
Overlay (DTBO) images.
|
||||||
|
Attributes:
|
||||||
|
_DTBO_MAGIC: Device tree table header magic.
|
||||||
|
_ACPIO_MAGIC: Advanced Configuration and Power Interface table header
|
||||||
|
magic.
|
||||||
|
_DT_TABLE_HEADER_SIZE: Size of Device tree table header.
|
||||||
|
_DT_TABLE_HEADER_INTS: Number of integers in DT table header.
|
||||||
|
_DT_ENTRY_HEADER_SIZE: Size of Device tree entry header within a DTBO.
|
||||||
|
_DT_ENTRY_HEADER_INTS: Number of integers in DT entry header.
|
||||||
|
_GZIP_COMPRESSION_WBITS: Argument 'wbits' for gzip compression
|
||||||
|
_ZLIB_DECOMPRESSION_WBITS: Argument 'wbits' for zlib/gzip compression
|
||||||
|
"""
|
||||||
|
_DTBO_MAGIC = 0xd7b7ab1e
|
||||||
|
_ACPIO_MAGIC = 0x41435049
|
||||||
|
_DT_TABLE_HEADER_SIZE = struct.calcsize('>8I')
|
||||||
|
_DT_TABLE_HEADER_INTS = 8
|
||||||
|
_DT_ENTRY_HEADER_SIZE = struct.calcsize('>8I')
|
||||||
|
_DT_ENTRY_HEADER_INTS = 8
|
||||||
|
_GZIP_COMPRESSION_WBITS = 31
|
||||||
|
_ZLIB_DECOMPRESSION_WBITS = 47
|
||||||
|
def _update_dt_table_header(self):
|
||||||
|
"""Converts header entries into binary data for DTBO header.
|
||||||
|
Packs the current Device tree table header attribute values in
|
||||||
|
metadata buffer.
|
||||||
|
"""
|
||||||
|
struct.pack_into('>8I', self.__metadata, 0, self.magic,
|
||||||
|
self.total_size, self.header_size,
|
||||||
|
self.dt_entry_size, self.dt_entry_count,
|
||||||
|
self.dt_entries_offset, self.page_size,
|
||||||
|
self.version)
|
||||||
|
def _update_dt_entry_header(self, dt_entry, metadata_offset):
|
||||||
|
"""Converts each DT entry header entry into binary data for DTBO file.
|
||||||
|
Packs the current device tree table entry attribute into
|
||||||
|
metadata buffer as device tree entry header.
|
||||||
|
Args:
|
||||||
|
dt_entry: DtEntry object for the header to be packed.
|
||||||
|
metadata_offset: Offset into metadata buffer to begin writing.
|
||||||
|
dtbo_offset: Offset where the DT image file for this dt_entry can
|
||||||
|
be found in the resulting DTBO image.
|
||||||
|
"""
|
||||||
|
if self.version == 0:
|
||||||
|
struct.pack_into('>8I', self.__metadata, metadata_offset, dt_entry.size,
|
||||||
|
dt_entry.dt_offset, dt_entry.image_id, dt_entry.rev,
|
||||||
|
dt_entry.custom0, dt_entry.custom1, dt_entry.custom2,
|
||||||
|
dt_entry.custom3)
|
||||||
|
elif self.version == 1:
|
||||||
|
struct.pack_into('>8I', self.__metadata, metadata_offset, dt_entry.size,
|
||||||
|
dt_entry.dt_offset, dt_entry.image_id, dt_entry.rev,
|
||||||
|
dt_entry.flags, dt_entry.custom0, dt_entry.custom1,
|
||||||
|
dt_entry.custom2)
|
||||||
|
def _update_metadata(self):
|
||||||
|
"""Updates the DTBO metadata.
|
||||||
|
Initialize the internal metadata buffer and fill it with all Device
|
||||||
|
Tree table entries and update the DTBO header.
|
||||||
|
"""
|
||||||
|
self.__metadata = array('b', b' ' * self.__metadata_size)
|
||||||
|
metadata_offset = self.header_size
|
||||||
|
for dt_entry in self.__dt_entries:
|
||||||
|
self._update_dt_entry_header(dt_entry, metadata_offset)
|
||||||
|
metadata_offset += self.dt_entry_size
|
||||||
|
self._update_dt_table_header()
|
||||||
|
def _read_dtbo_header(self, buf):
|
||||||
|
"""Reads DTBO file header into metadata buffer.
|
||||||
|
Unpack and read the DTBO table header from given buffer. The
|
||||||
|
buffer size must exactly be equal to _DT_TABLE_HEADER_SIZE.
|
||||||
|
Args:
|
||||||
|
buf: Bytebuffer read directly from the file of size
|
||||||
|
_DT_TABLE_HEADER_SIZE.
|
||||||
|
"""
|
||||||
|
(self.magic, self.total_size, self.header_size,
|
||||||
|
self.dt_entry_size, self.dt_entry_count, self.dt_entries_offset,
|
||||||
|
self.page_size, self.version) = struct.unpack_from('>8I', buf, 0)
|
||||||
|
# verify the header
|
||||||
|
if self.magic != self._DTBO_MAGIC and self.magic != self._ACPIO_MAGIC:
|
||||||
|
raise ValueError('Invalid magic number 0x%x in DTBO/ACPIO file' %
|
||||||
|
(self.magic))
|
||||||
|
if self.header_size != self._DT_TABLE_HEADER_SIZE:
|
||||||
|
raise ValueError('Invalid header size (%d) in DTBO/ACPIO file' %
|
||||||
|
(self.header_size))
|
||||||
|
if self.dt_entry_size != self._DT_ENTRY_HEADER_SIZE:
|
||||||
|
raise ValueError('Invalid DT entry header size (%d) in DTBO/ACPIO file' %
|
||||||
|
(self.dt_entry_size))
|
||||||
|
def _read_dt_entries_from_metadata(self):
|
||||||
|
"""Reads individual DT entry headers from metadata buffer.
|
||||||
|
Unpack and read the DTBO DT entry headers from the internal buffer.
|
||||||
|
The buffer size must exactly be equal to _DT_TABLE_HEADER_SIZE +
|
||||||
|
(_DT_ENTRY_HEADER_SIZE * dt_entry_count). The method raises exception
|
||||||
|
if DT entries have already been set for this object.
|
||||||
|
"""
|
||||||
|
if self.__dt_entries:
|
||||||
|
raise ValueError('DTBO DT entries can be added only once')
|
||||||
|
offset = self.dt_entries_offset // 4
|
||||||
|
params = {}
|
||||||
|
params['version'] = self.version
|
||||||
|
params['dt_file'] = None
|
||||||
|
for i in range(0, self.dt_entry_count):
|
||||||
|
dt_table_entry = self.__metadata[offset:offset + self._DT_ENTRY_HEADER_INTS]
|
||||||
|
params['dt_size'] = dt_table_entry[0]
|
||||||
|
params['dt_offset'] = dt_table_entry[1]
|
||||||
|
for j in range(2, self._DT_ENTRY_HEADER_INTS):
|
||||||
|
required_keys = None
|
||||||
|
if self.version == 0:
|
||||||
|
required_keys = DtEntry.REQUIRED_KEYS_V0
|
||||||
|
elif self.version == 1:
|
||||||
|
required_keys = DtEntry.REQUIRED_KEYS_V1
|
||||||
|
params[required_keys[j + 1]] = str(dt_table_entry[j])
|
||||||
|
dt_entry = DtEntry(**params)
|
||||||
|
self.__dt_entries.append(dt_entry)
|
||||||
|
offset += self._DT_ENTRY_HEADER_INTS
|
||||||
|
def _read_dtbo_image(self):
|
||||||
|
"""Parse the input file and instantiate this object."""
|
||||||
|
# First check if we have enough to read the header
|
||||||
|
file_size = os.fstat(self.__file.fileno()).st_size
|
||||||
|
if file_size < self._DT_TABLE_HEADER_SIZE:
|
||||||
|
raise ValueError('Invalid DTBO file')
|
||||||
|
self.__file.seek(0)
|
||||||
|
buf = self.__file.read(self._DT_TABLE_HEADER_SIZE)
|
||||||
|
self._read_dtbo_header(buf)
|
||||||
|
self.__metadata_size = (self.header_size +
|
||||||
|
self.dt_entry_count * self.dt_entry_size)
|
||||||
|
if file_size < self.__metadata_size:
|
||||||
|
raise ValueError('Invalid or truncated DTBO file of size %d expected %d' %
|
||||||
|
file_size, self.__metadata_size)
|
||||||
|
num_ints = (self._DT_TABLE_HEADER_INTS +
|
||||||
|
self.dt_entry_count * self._DT_ENTRY_HEADER_INTS)
|
||||||
|
if self.dt_entries_offset > self._DT_TABLE_HEADER_SIZE:
|
||||||
|
num_ints += (self.dt_entries_offset - self._DT_TABLE_HEADER_SIZE) / 4
|
||||||
|
format_str = '>' + str(num_ints) + 'I'
|
||||||
|
self.__file.seek(0)
|
||||||
|
self.__metadata = struct.unpack(format_str,
|
||||||
|
self.__file.read(self.__metadata_size))
|
||||||
|
self._read_dt_entries_from_metadata()
|
||||||
|
def _find_dt_entry_with_same_file(self, dt_entry):
|
||||||
|
"""Finds DT Entry that has identical backing DT file.
|
||||||
|
Args:
|
||||||
|
dt_entry: DtEntry object whose 'dtfile' we find for existence in the
|
||||||
|
current 'dt_entries'.
|
||||||
|
Returns:
|
||||||
|
If a match by file path is found, the corresponding DtEntry object
|
||||||
|
from internal list is returned. If not, 'None' is returned.
|
||||||
|
"""
|
||||||
|
dt_entry_path = os.path.realpath(dt_entry.dt_file.name)
|
||||||
|
for entry in self.__dt_entries:
|
||||||
|
entry_path = os.path.realpath(entry.dt_file.name)
|
||||||
|
if entry_path == dt_entry_path:
|
||||||
|
return entry
|
||||||
|
return None
|
||||||
|
def __init__(self, file_handle, dt_type='dtb', page_size=None, version=0):
|
||||||
|
"""Constructor for Dtbo Object
|
||||||
|
Args:
|
||||||
|
file_handle: The Dtbo File handle corresponding to this object.
|
||||||
|
The file handle can be used to write to (in case of 'create')
|
||||||
|
or read from (in case of 'dump')
|
||||||
|
"""
|
||||||
|
self.__file = file_handle
|
||||||
|
self.__dt_entries = []
|
||||||
|
self.__metadata = None
|
||||||
|
self.__metadata_size = 0
|
||||||
|
# if page_size is given, assume the object is being instantiated to
|
||||||
|
# create a DTBO file
|
||||||
|
if page_size:
|
||||||
|
if dt_type == 'acpi':
|
||||||
|
self.magic = self._ACPIO_MAGIC
|
||||||
|
else:
|
||||||
|
self.magic = self._DTBO_MAGIC
|
||||||
|
self.total_size = self._DT_TABLE_HEADER_SIZE
|
||||||
|
self.header_size = self._DT_TABLE_HEADER_SIZE
|
||||||
|
self.dt_entry_size = self._DT_ENTRY_HEADER_SIZE
|
||||||
|
self.dt_entry_count = 0
|
||||||
|
self.dt_entries_offset = self._DT_TABLE_HEADER_SIZE
|
||||||
|
self.page_size = page_size
|
||||||
|
self.version = version
|
||||||
|
self.__metadata_size = self._DT_TABLE_HEADER_SIZE
|
||||||
|
else:
|
||||||
|
self._read_dtbo_image()
|
||||||
|
def __str__(self):
|
||||||
|
sb = []
|
||||||
|
sb.append('dt_table_header:')
|
||||||
|
_keys = ('magic', 'total_size', 'header_size', 'dt_entry_size',
|
||||||
|
'dt_entry_count', 'dt_entries_offset', 'page_size', 'version')
|
||||||
|
for key in _keys:
|
||||||
|
if key == 'magic':
|
||||||
|
sb.append('{key:>20} = {value:08x}'.format(key=key,
|
||||||
|
value=self.__dict__[key]))
|
||||||
|
else:
|
||||||
|
sb.append('{key:>20} = {value:d}'.format(key=key,
|
||||||
|
value=self.__dict__[key]))
|
||||||
|
count = 0
|
||||||
|
for dt_entry in self.__dt_entries:
|
||||||
|
sb.append('dt_table_entry[{0:d}]:'.format(count))
|
||||||
|
sb.append(str(dt_entry))
|
||||||
|
count = count + 1
|
||||||
|
return '\n'.join(sb)
|
||||||
|
@property
|
||||||
|
def dt_entries(self):
|
||||||
|
"""Returns a list of DtEntry objects found in DTBO file."""
|
||||||
|
return self.__dt_entries
|
||||||
|
def compress_dt_entry(self, compression_format, dt_entry_file):
|
||||||
|
"""Compresses a DT entry.
|
||||||
|
Args:
|
||||||
|
compression_format: Compression format for DT Entry
|
||||||
|
dt_entry_file: File handle to read DT entry from.
|
||||||
|
Returns:
|
||||||
|
Compressed DT entry and its length.
|
||||||
|
Raises:
|
||||||
|
ValueError if unrecognized compression format is found.
|
||||||
|
"""
|
||||||
|
compress_zlib = zlib.compressobj() # zlib
|
||||||
|
compress_gzip = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
|
||||||
|
zlib.DEFLATED, self._GZIP_COMPRESSION_WBITS) # gzip
|
||||||
|
compression_obj_dict = {
|
||||||
|
CompressionFormat.NO_COMPRESSION: None,
|
||||||
|
CompressionFormat.ZLIB_COMPRESSION: compress_zlib,
|
||||||
|
CompressionFormat.GZIP_COMPRESSION: compress_gzip,
|
||||||
|
}
|
||||||
|
if compression_format not in compression_obj_dict:
|
||||||
|
ValueError("Bad compression format %d" % compression_format)
|
||||||
|
if compression_format is CompressionFormat.NO_COMPRESSION:
|
||||||
|
dt_entry = dt_entry_file.read()
|
||||||
|
else:
|
||||||
|
compression_object = compression_obj_dict[compression_format]
|
||||||
|
dt_entry_file.seek(0)
|
||||||
|
dt_entry = compression_object.compress(dt_entry_file.read())
|
||||||
|
dt_entry += compression_object.flush()
|
||||||
|
return dt_entry, len(dt_entry)
|
||||||
|
def add_dt_entries(self, dt_entries):
|
||||||
|
"""Adds DT image files to the DTBO object.
|
||||||
|
Adds a list of Dtentry Objects to the DTBO image. The changes are not
|
||||||
|
committed to the output file until commit() is called.
|
||||||
|
Args:
|
||||||
|
dt_entries: List of DtEntry object to be added.
|
||||||
|
Returns:
|
||||||
|
A buffer containing all DT entries.
|
||||||
|
Raises:
|
||||||
|
ValueError: if the list of DT entries is empty or if a list of DT entries
|
||||||
|
has already been added to the DTBO.
|
||||||
|
"""
|
||||||
|
if not dt_entries:
|
||||||
|
raise ValueError('Attempted to add empty list of DT entries')
|
||||||
|
if self.__dt_entries:
|
||||||
|
raise ValueError('DTBO DT entries can be added only once')
|
||||||
|
dt_entry_count = len(dt_entries)
|
||||||
|
dt_offset = (self.header_size +
|
||||||
|
dt_entry_count * self.dt_entry_size)
|
||||||
|
dt_entry_buf = b""
|
||||||
|
for dt_entry in dt_entries:
|
||||||
|
if not isinstance(dt_entry, DtEntry):
|
||||||
|
raise ValueError('Adding invalid DT entry object to DTBO')
|
||||||
|
entry = self._find_dt_entry_with_same_file(dt_entry)
|
||||||
|
dt_entry_compression_info = dt_entry.compression_info()
|
||||||
|
if entry and (entry.compression_info() == dt_entry_compression_info):
|
||||||
|
dt_entry.dt_offset = entry.dt_offset
|
||||||
|
dt_entry.size = entry.size
|
||||||
|
else:
|
||||||
|
dt_entry.dt_offset = dt_offset
|
||||||
|
compressed_entry, dt_entry.size = self.compress_dt_entry(dt_entry_compression_info,
|
||||||
|
dt_entry.dt_file)
|
||||||
|
dt_entry_buf += compressed_entry
|
||||||
|
dt_offset += dt_entry.size
|
||||||
|
self.total_size += dt_entry.size
|
||||||
|
self.__dt_entries.append(dt_entry)
|
||||||
|
self.dt_entry_count += 1
|
||||||
|
self.__metadata_size += self.dt_entry_size
|
||||||
|
self.total_size += self.dt_entry_size
|
||||||
|
return dt_entry_buf
|
||||||
|
def extract_dt_file(self, idx, fout, decompress):
|
||||||
|
"""Extract DT Image files embedded in the DTBO file.
|
||||||
|
Extracts Device Tree blob image file at given index into a file handle.
|
||||||
|
Args:
|
||||||
|
idx: Index of the DT entry in the DTBO file.
|
||||||
|
fout: File handle where the DTB at index idx to be extracted into.
|
||||||
|
decompress: If a DT entry is compressed, decompress it before writing
|
||||||
|
it to the file handle.
|
||||||
|
Raises:
|
||||||
|
ValueError: if invalid DT entry index or compression format is detected.
|
||||||
|
"""
|
||||||
|
if idx > self.dt_entry_count:
|
||||||
|
raise ValueError('Invalid index %d of DtEntry' % idx)
|
||||||
|
size = self.dt_entries[idx].size
|
||||||
|
offset = self.dt_entries[idx].dt_offset
|
||||||
|
self.__file.seek(offset, 0)
|
||||||
|
fout.seek(0)
|
||||||
|
compression_format = self.dt_entries[idx].compression_info()
|
||||||
|
if decompress and compression_format:
|
||||||
|
if (compression_format == CompressionFormat.ZLIB_COMPRESSION or
|
||||||
|
compression_format == CompressionFormat.GZIP_COMPRESSION):
|
||||||
|
fout.write(zlib.decompress(self.__file.read(size), self._ZLIB_DECOMPRESSION_WBITS))
|
||||||
|
else:
|
||||||
|
raise ValueError("Unknown compression format detected")
|
||||||
|
else:
|
||||||
|
fout.write(self.__file.read(size))
|
||||||
|
def commit(self, dt_entry_buf):
|
||||||
|
"""Write out staged changes to the DTBO object to create a DTBO file.
|
||||||
|
Writes a fully instantiated Dtbo Object into the output file using the
|
||||||
|
file handle present in '_file'. No checks are performed on the object
|
||||||
|
except for existence of output file handle on the object before writing
|
||||||
|
out the file.
|
||||||
|
Args:
|
||||||
|
dt_entry_buf: Buffer containing all DT entries.
|
||||||
|
"""
|
||||||
|
if not self.__file:
|
||||||
|
raise ValueError('No file given to write to.')
|
||||||
|
if not self.__dt_entries:
|
||||||
|
raise ValueError('No DT image files to embed into DTBO image given.')
|
||||||
|
self._update_metadata()
|
||||||
|
self.__file.seek(0)
|
||||||
|
self.__file.write(self.__metadata)
|
||||||
|
self.__file.write(dt_entry_buf)
|
||||||
|
self.__file.flush()
|
||||||
|
def parse_dt_entry(global_args, arglist):
|
||||||
|
"""Parse arguments for single DT entry file.
|
||||||
|
Parses command line arguments for single DT image file while
|
||||||
|
creating a Device tree blob overlay (DTBO).
|
||||||
|
Args:
|
||||||
|
global_args: Dtbo object containing global default values
|
||||||
|
for DtEntry attributes.
|
||||||
|
arglist: Command line argument list for this DtEntry.
|
||||||
|
Returns:
|
||||||
|
A Namespace object containing all values to instantiate DtEntry object.
|
||||||
|
"""
|
||||||
|
parser = argparse.ArgumentParser(add_help=False)
|
||||||
|
parser.add_argument('dt_file', nargs='?',
|
||||||
|
type=argparse.FileType('rb'),
|
||||||
|
default=None)
|
||||||
|
parser.add_argument('--id', type=str, dest='id', action='store',
|
||||||
|
default=global_args.global_id)
|
||||||
|
parser.add_argument('--rev', type=str, dest='rev',
|
||||||
|
action='store', default=global_args.global_rev)
|
||||||
|
parser.add_argument('--flags', type=str, dest='flags',
|
||||||
|
action='store',
|
||||||
|
default=global_args.global_flags)
|
||||||
|
parser.add_argument('--custom0', type=str, dest='custom0',
|
||||||
|
action='store',
|
||||||
|
default=global_args.global_custom0)
|
||||||
|
parser.add_argument('--custom1', type=str, dest='custom1',
|
||||||
|
action='store',
|
||||||
|
default=global_args.global_custom1)
|
||||||
|
parser.add_argument('--custom2', type=str, dest='custom2',
|
||||||
|
action='store',
|
||||||
|
default=global_args.global_custom2)
|
||||||
|
parser.add_argument('--custom3', type=str, dest='custom3',
|
||||||
|
action='store',
|
||||||
|
default=global_args.global_custom3)
|
||||||
|
return parser.parse_args(arglist)
|
||||||
|
def parse_dt_entries(global_args, arg_list):
|
||||||
|
"""Parse all DT entries from command line.
|
||||||
|
Parse all DT image files and their corresponding attribute from
|
||||||
|
command line
|
||||||
|
Args:
|
||||||
|
global_args: Argument containing default global values for _id,
|
||||||
|
_rev and customX.
|
||||||
|
arg_list: The remainder of the command line after global options
|
||||||
|
DTBO creation have been parsed.
|
||||||
|
Returns:
|
||||||
|
A List of DtEntry objects created after parsing the command line
|
||||||
|
given in argument.
|
||||||
|
"""
|
||||||
|
dt_entries = []
|
||||||
|
img_file_idx = []
|
||||||
|
idx = 0
|
||||||
|
# find all positional arguments (i.e. DT image file paths)
|
||||||
|
for arg in arg_list:
|
||||||
|
if not arg.startswith("--"):
|
||||||
|
img_file_idx.append(idx)
|
||||||
|
idx = idx + 1
|
||||||
|
if not img_file_idx:
|
||||||
|
raise ValueError('Input DT images must be provided')
|
||||||
|
total_images = len(img_file_idx)
|
||||||
|
for idx in range(total_images):
|
||||||
|
start_idx = img_file_idx[idx]
|
||||||
|
if idx == total_images - 1:
|
||||||
|
argv = arg_list[start_idx:]
|
||||||
|
else:
|
||||||
|
end_idx = img_file_idx[idx + 1]
|
||||||
|
argv = arg_list[start_idx:end_idx]
|
||||||
|
args = parse_dt_entry(global_args, argv)
|
||||||
|
params = vars(args)
|
||||||
|
params['version'] = global_args.version
|
||||||
|
params['dt_offset'] = 0
|
||||||
|
params['dt_size'] = os.fstat(params['dt_file'].fileno()).st_size
|
||||||
|
dt_entries.append(DtEntry(**params))
|
||||||
|
return dt_entries
|
||||||
|
def parse_config_option(line, is_global, dt_keys, global_key_types):
|
||||||
|
"""Parses a single line from the configuration file.
|
||||||
|
Args:
|
||||||
|
line: String containing the key=value line from the file.
|
||||||
|
is_global: Boolean indicating if we should parse global or DT entry
|
||||||
|
specific option.
|
||||||
|
dt_keys: Tuple containing all valid DT entry and global option strings
|
||||||
|
in configuration file.
|
||||||
|
global_key_types: A dict of global options and their corresponding types. It
|
||||||
|
contains all exclusive valid global option strings in configuration
|
||||||
|
file that are not repeated in dt entry options.
|
||||||
|
Returns:
|
||||||
|
Returns a tuple for parsed key and value for the option. Also, checks
|
||||||
|
the key to make sure its valid.
|
||||||
|
"""
|
||||||
|
if line.find('=') == -1:
|
||||||
|
raise ValueError('Invalid line (%s) in configuration file' % line)
|
||||||
|
key, value = (x.strip() for x in line.split('='))
|
||||||
|
if is_global and key in global_key_types:
|
||||||
|
if global_key_types[key] is int:
|
||||||
|
value = int(value)
|
||||||
|
elif key not in dt_keys:
|
||||||
|
raise ValueError('Invalid option (%s) in configuration file' % key)
|
||||||
|
return key, value
|
||||||
|
def parse_config_file(fin, dt_keys, global_key_types):
|
||||||
|
"""Parses the configuration file for creating DTBO image.
|
||||||
|
Args:
|
||||||
|
fin: File handle for configuration file
|
||||||
|
is_global: Boolean indicating if we should parse global or DT entry
|
||||||
|
specific option.
|
||||||
|
dt_keys: Tuple containing all valid DT entry and global option strings
|
||||||
|
in configuration file.
|
||||||
|
global_key_types: A dict of global options and their corresponding types. It
|
||||||
|
contains all exclusive valid global option strings in configuration
|
||||||
|
file that are not repeated in dt entry options.
|
||||||
|
Returns:
|
||||||
|
global_args, dt_args: Tuple of a dictionary with global arguments
|
||||||
|
and a list of dictionaries for all DT entry specific arguments the
|
||||||
|
following format.
|
||||||
|
global_args:
|
||||||
|
{'id' : <value>, 'rev' : <value> ...}
|
||||||
|
dt_args:
|
||||||
|
[{'filename' : 'dt_file_name', 'id' : <value>,
|
||||||
|
'rev' : <value> ...},
|
||||||
|
{'filename' : 'dt_file_name2', 'id' : <value2>,
|
||||||
|
'rev' : <value2> ...}, ...
|
||||||
|
]
|
||||||
|
"""
|
||||||
|
# set all global defaults
|
||||||
|
global_args = dict((k, '0') for k in dt_keys)
|
||||||
|
global_args['dt_type'] = 'dtb'
|
||||||
|
global_args['page_size'] = 2048
|
||||||
|
global_args['version'] = 0
|
||||||
|
dt_args = []
|
||||||
|
found_dt_entry = False
|
||||||
|
count = -1
|
||||||
|
for line in fin:
|
||||||
|
line = line.rstrip()
|
||||||
|
if line.lstrip().startswith('#'):
|
||||||
|
continue
|
||||||
|
comment_idx = line.find('#')
|
||||||
|
line = line if comment_idx == -1 else line[0:comment_idx]
|
||||||
|
if not line or line.isspace():
|
||||||
|
continue
|
||||||
|
if line.startswith((' ', '\t')) and not found_dt_entry:
|
||||||
|
# This is a global argument
|
||||||
|
key, value = parse_config_option(line, True, dt_keys, global_key_types)
|
||||||
|
global_args[key] = value
|
||||||
|
elif line.find('=') != -1:
|
||||||
|
key, value = parse_config_option(line, False, dt_keys, global_key_types)
|
||||||
|
dt_args[-1][key] = value
|
||||||
|
else:
|
||||||
|
found_dt_entry = True
|
||||||
|
count += 1
|
||||||
|
dt_args.append({})
|
||||||
|
dt_args[-1]['filename'] = line.strip()
|
||||||
|
return global_args, dt_args
|
||||||
|
def parse_create_args(arg_list):
|
||||||
|
"""Parse command line arguments for 'create' sub-command.
|
||||||
|
Args:
|
||||||
|
arg_list: All command line arguments except the outfile file name.
|
||||||
|
Returns:
|
||||||
|
The list of remainder of the command line arguments after parsing
|
||||||
|
for 'create'.
|
||||||
|
"""
|
||||||
|
image_arg_index = 0
|
||||||
|
for arg in arg_list:
|
||||||
|
if not arg.startswith("--"):
|
||||||
|
break
|
||||||
|
image_arg_index = image_arg_index + 1
|
||||||
|
argv = arg_list[0:image_arg_index]
|
||||||
|
remainder = arg_list[image_arg_index:]
|
||||||
|
parser = argparse.ArgumentParser(prog='create', add_help=False)
|
||||||
|
parser.add_argument('--dt_type', type=str, dest='dt_type',
|
||||||
|
action='store', default='dtb')
|
||||||
|
parser.add_argument('--page_size', type=int, dest='page_size',
|
||||||
|
action='store', default=2048)
|
||||||
|
parser.add_argument('--version', type=int, dest='version',
|
||||||
|
action='store', default=0)
|
||||||
|
parser.add_argument('--id', type=str, dest='global_id',
|
||||||
|
action='store', default='0')
|
||||||
|
parser.add_argument('--rev', type=str, dest='global_rev',
|
||||||
|
action='store', default='0')
|
||||||
|
parser.add_argument('--flags', type=str, dest='global_flags',
|
||||||
|
action='store', default='0')
|
||||||
|
parser.add_argument('--custom0', type=str, dest='global_custom0',
|
||||||
|
action='store', default='0')
|
||||||
|
parser.add_argument('--custom1', type=str, dest='global_custom1',
|
||||||
|
action='store', default='0')
|
||||||
|
parser.add_argument('--custom2', type=str, dest='global_custom2',
|
||||||
|
action='store', default='0')
|
||||||
|
parser.add_argument('--custom3', type=str, dest='global_custom3',
|
||||||
|
action='store', default='0')
|
||||||
|
args = parser.parse_args(argv)
|
||||||
|
return args, remainder
|
||||||
|
def parse_dump_cmd_args(arglist):
|
||||||
|
"""Parse command line arguments for 'dump' sub-command.
|
||||||
|
Args:
|
||||||
|
arglist: List of all command line arguments including the outfile
|
||||||
|
file name if exists.
|
||||||
|
Returns:
|
||||||
|
A namespace object of parsed arguments.
|
||||||
|
"""
|
||||||
|
parser = argparse.ArgumentParser(prog='dump')
|
||||||
|
parser.add_argument('--output', '-o', nargs='?',
|
||||||
|
type=argparse.FileType('w'),
|
||||||
|
dest='outfile',
|
||||||
|
default=stdout)
|
||||||
|
parser.add_argument('--dtb', '-b', nargs='?', type=str,
|
||||||
|
dest='dtfilename')
|
||||||
|
parser.add_argument('--decompress', action='store_true', dest='decompress')
|
||||||
|
return parser.parse_args(arglist)
|
||||||
|
def parse_config_create_cmd_args(arglist):
|
||||||
|
"""Parse command line arguments for 'cfg_create subcommand.
|
||||||
|
Args:
|
||||||
|
arglist: A list of all command line arguments including the
|
||||||
|
mandatory input configuration file name.
|
||||||
|
Returns:
|
||||||
|
A Namespace object of parsed arguments.
|
||||||
|
"""
|
||||||
|
parser = argparse.ArgumentParser(prog='cfg_create')
|
||||||
|
parser.add_argument('conf_file', nargs='?',
|
||||||
|
type=argparse.FileType('r'),
|
||||||
|
default=None)
|
||||||
|
cwd = os.getcwd()
|
||||||
|
parser.add_argument('--dtb-dir', '-d', nargs='?', type=str,
|
||||||
|
dest='dtbdir', default=cwd)
|
||||||
|
return parser.parse_args(arglist)
|
||||||
|
def create_dtbo_image(fout, argv):
|
||||||
|
"""Create Device Tree Blob Overlay image using provided arguments.
|
||||||
|
Args:
|
||||||
|
fout: Output file handle to write to.
|
||||||
|
argv: list of command line arguments.
|
||||||
|
"""
|
||||||
|
global_args, remainder = parse_create_args(argv)
|
||||||
|
if not remainder:
|
||||||
|
raise ValueError('List of dtimages to add to DTBO not provided')
|
||||||
|
dt_entries = parse_dt_entries(global_args, remainder)
|
||||||
|
dtbo = Dtbo(fout, global_args.dt_type, global_args.page_size, global_args.version)
|
||||||
|
dt_entry_buf = dtbo.add_dt_entries(dt_entries)
|
||||||
|
dtbo.commit(dt_entry_buf)
|
||||||
|
fout.close()
|
||||||
|
def dump_dtbo_image(fin, argv):
|
||||||
|
"""Dump DTBO file.
|
||||||
|
Dump Device Tree Blob Overlay metadata as output and the device
|
||||||
|
tree image files embedded in the DTBO image into file(s) provided
|
||||||
|
as arguments
|
||||||
|
Args:
|
||||||
|
fin: Input DTBO image files.
|
||||||
|
argv: list of command line arguments.
|
||||||
|
"""
|
||||||
|
dtbo = Dtbo(fin)
|
||||||
|
args = parse_dump_cmd_args(argv)
|
||||||
|
if args.dtfilename:
|
||||||
|
num_entries = len(dtbo.dt_entries)
|
||||||
|
for idx in range(0, num_entries):
|
||||||
|
with open(args.dtfilename + '.{:d}'.format(idx), 'wb') as fout:
|
||||||
|
dtbo.extract_dt_file(idx, fout, args.decompress)
|
||||||
|
args.outfile.write(str(dtbo) + '\n')
|
||||||
|
args.outfile.close()
|
||||||
|
def create_dtbo_image_from_config(fout, argv):
|
||||||
|
"""Create DTBO file from a configuration file.
|
||||||
|
Args:
|
||||||
|
fout: Output file handle to write to.
|
||||||
|
argv: list of command line arguments.
|
||||||
|
"""
|
||||||
|
args = parse_config_create_cmd_args(argv)
|
||||||
|
if not args.conf_file:
|
||||||
|
raise ValueError('Configuration file must be provided')
|
||||||
|
_DT_KEYS = ('id', 'rev', 'flags', 'custom0', 'custom1', 'custom2', 'custom3')
|
||||||
|
_GLOBAL_KEY_TYPES = {'dt_type': str, 'page_size': int, 'version': int}
|
||||||
|
global_args, dt_args = parse_config_file(args.conf_file,
|
||||||
|
_DT_KEYS, _GLOBAL_KEY_TYPES)
|
||||||
|
version = global_args['version']
|
||||||
|
params = {}
|
||||||
|
params['version'] = version
|
||||||
|
dt_entries = []
|
||||||
|
for dt_arg in dt_args:
|
||||||
|
filepath = dt_arg['filename']
|
||||||
|
if not os.path.isabs(filepath):
|
||||||
|
for root, dirnames, filenames in os.walk(args.dtbdir):
|
||||||
|
for filename in fnmatch.filter(filenames, os.path.basename(filepath)):
|
||||||
|
filepath = os.path.join(root, filename)
|
||||||
|
params['dt_file'] = open(filepath, 'rb')
|
||||||
|
params['dt_offset'] = 0
|
||||||
|
params['dt_size'] = os.fstat(params['dt_file'].fileno()).st_size
|
||||||
|
for key in _DT_KEYS:
|
||||||
|
if key not in dt_arg:
|
||||||
|
params[key] = global_args[key]
|
||||||
|
else:
|
||||||
|
params[key] = dt_arg[key]
|
||||||
|
dt_entries.append(DtEntry(**params))
|
||||||
|
# Create and write DTBO file
|
||||||
|
dtbo = Dtbo(fout, global_args['dt_type'], global_args['page_size'], version)
|
||||||
|
dt_entry_buf = dtbo.add_dt_entries(dt_entries)
|
||||||
|
dtbo.commit(dt_entry_buf)
|
||||||
|
fout.close()
|
||||||
|
def print_default_usage(progname):
|
||||||
|
"""Prints program's default help string.
|
||||||
|
Args:
|
||||||
|
progname: This program's name.
|
||||||
|
"""
|
||||||
|
sb = []
|
||||||
|
sb.append(' ' + progname + ' help all')
|
||||||
|
sb.append(' ' + progname + ' help <command>\n')
|
||||||
|
sb.append(' commands:')
|
||||||
|
sb.append(' help, dump, create, cfg_create')
|
||||||
|
print('\n'.join(sb))
|
||||||
|
def print_dump_usage(progname):
|
||||||
|
"""Prints usage for 'dump' sub-command.
|
||||||
|
Args:
|
||||||
|
progname: This program's name.
|
||||||
|
"""
|
||||||
|
sb = []
|
||||||
|
sb.append(' ' + progname + ' dump <image_file> (<option>...)\n')
|
||||||
|
sb.append(' options:')
|
||||||
|
sb.append(' -o, --output <filename> Output file name.')
|
||||||
|
sb.append(' Default is output to stdout.')
|
||||||
|
sb.append(' -b, --dtb <filename> Dump dtb/dtbo files from image.')
|
||||||
|
sb.append(' Will output to <filename>.0, <filename>.1, etc.')
|
||||||
|
print('\n'.join(sb))
|
||||||
|
def print_create_usage(progname):
|
||||||
|
"""Prints usage for 'create' subcommand.
|
||||||
|
Args:
|
||||||
|
progname: This program's name.
|
||||||
|
"""
|
||||||
|
sb = []
|
||||||
|
sb.append(' ' + progname + ' create <image_file> (<global_option>...) (<dtb_file> (<entry_option>...) ...)\n')
|
||||||
|
sb.append(' global_options:')
|
||||||
|
sb.append(' --dt_type=<type> Device Tree Type (dtb|acpi). Default: dtb')
|
||||||
|
sb.append(' --page_size=<number> Page size. Default: 2048')
|
||||||
|
sb.append(' --version=<number> DTBO/ACPIO version. Default: 0')
|
||||||
|
sb.append(' --id=<number> The default value to set property id in dt_table_entry. Default: 0')
|
||||||
|
sb.append(' --rev=<number>')
|
||||||
|
sb.append(' --flags=<number>')
|
||||||
|
sb.append(' --custom0=<number>')
|
||||||
|
sb.append(' --custom1=<number>')
|
||||||
|
sb.append(' --custom2=<number>\n')
|
||||||
|
sb.append(' --custom3=<number>\n')
|
||||||
|
sb.append(' The value could be a number or a DT node path.')
|
||||||
|
sb.append(' <number> could be a 32-bits digit or hex value, ex. 68000, 0x6800.')
|
||||||
|
sb.append(' <path> format is <full_node_path>:<property_name>, ex. /board/:id,')
|
||||||
|
sb.append(' will read the value in given FTB file with the path.')
|
||||||
|
print('\n'.join(sb))
|
||||||
|
def print_cfg_create_usage(progname):
|
||||||
|
"""Prints usage for 'cfg_create' sub-command.
|
||||||
|
Args:
|
||||||
|
progname: This program's name.
|
||||||
|
"""
|
||||||
|
sb = []
|
||||||
|
sb.append(' ' + progname + ' cfg_create <image_file> <config_file> (<option>...)\n')
|
||||||
|
sb.append(' options:')
|
||||||
|
sb.append(' -d, --dtb-dir <dir> The path to load dtb files.')
|
||||||
|
sb.append(' Default is load from the current path.')
|
||||||
|
print('\n'.join(sb))
|
||||||
|
def print_usage(cmd, _):
|
||||||
|
"""Prints usage for this program.
|
||||||
|
Args:
|
||||||
|
cmd: The string sub-command for which help (usage) is requested.
|
||||||
|
"""
|
||||||
|
prog_name = os.path.basename(__file__)
|
||||||
|
if not cmd:
|
||||||
|
print_default_usage(prog_name)
|
||||||
|
return
|
||||||
|
HelpCommand = namedtuple('HelpCommand', 'help_cmd, help_func')
|
||||||
|
help_commands = (HelpCommand('dump', print_dump_usage),
|
||||||
|
HelpCommand('create', print_create_usage),
|
||||||
|
HelpCommand('cfg_create', print_cfg_create_usage),
|
||||||
|
)
|
||||||
|
if cmd == 'all':
|
||||||
|
print_default_usage(prog_name)
|
||||||
|
for help_cmd, help_func in help_commands:
|
||||||
|
if cmd == 'all' or cmd == help_cmd:
|
||||||
|
help_func(prog_name)
|
||||||
|
if cmd != 'all':
|
||||||
|
return
|
||||||
|
print('Unsupported help command: %s' % cmd, end='\n\n')
|
||||||
|
print_default_usage(prog_name)
|
||||||
|
return
|
||||||
|
def main():
|
||||||
|
"""Main entry point for mkdtboimg."""
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
subparser = parser.add_subparsers(title='subcommand',
|
||||||
|
description='Valid subcommands')
|
||||||
|
create_parser = subparser.add_parser('create', add_help=False)
|
||||||
|
create_parser.add_argument('argfile', nargs='?',
|
||||||
|
action='store', help='Output File',
|
||||||
|
type=argparse.FileType('wb'))
|
||||||
|
create_parser.set_defaults(func=create_dtbo_image)
|
||||||
|
config_parser = subparser.add_parser('cfg_create', add_help=False)
|
||||||
|
config_parser.add_argument('argfile', nargs='?',
|
||||||
|
action='store',
|
||||||
|
type=argparse.FileType('wb'))
|
||||||
|
config_parser.set_defaults(func=create_dtbo_image_from_config)
|
||||||
|
dump_parser = subparser.add_parser('dump', add_help=False)
|
||||||
|
dump_parser.add_argument('argfile', nargs='?',
|
||||||
|
action='store',
|
||||||
|
type=argparse.FileType('rb'))
|
||||||
|
dump_parser.set_defaults(func=dump_dtbo_image)
|
||||||
|
help_parser = subparser.add_parser('help', add_help=False)
|
||||||
|
help_parser.add_argument('argfile', nargs='?', action='store')
|
||||||
|
help_parser.set_defaults(func=print_usage)
|
||||||
|
(subcmd, subcmd_args) = parser.parse_known_args()
|
||||||
|
subcmd.func(subcmd.argfile, subcmd_args)
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
47
kernel_build/mkbootimg/gki/Android.bp
Executable file
47
kernel_build/mkbootimg/gki/Android.bp
Executable file
|
@ -0,0 +1,47 @@
|
||||||
|
// Copyright (C) 2022 The Android Open Source Project
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package {
|
||||||
|
default_applicable_licenses: ["Android-Apache-2.0"],
|
||||||
|
}
|
||||||
|
|
||||||
|
python_test_host {
|
||||||
|
name: "certify_bootimg_test",
|
||||||
|
defaults: ["mkbootimg_defaults"],
|
||||||
|
main: "certify_bootimg_test.py",
|
||||||
|
srcs: [
|
||||||
|
"certify_bootimg_test.py",
|
||||||
|
],
|
||||||
|
data: [
|
||||||
|
":avbtool",
|
||||||
|
":certify_bootimg",
|
||||||
|
":mkbootimg",
|
||||||
|
":unpack_bootimg",
|
||||||
|
"testdata/*",
|
||||||
|
],
|
||||||
|
test_options: {
|
||||||
|
unit_test: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
python_binary_host {
|
||||||
|
name: "generate_gki_certificate",
|
||||||
|
defaults: ["mkbootimg_defaults"],
|
||||||
|
srcs: [
|
||||||
|
"generate_gki_certificate.py",
|
||||||
|
],
|
||||||
|
required: [
|
||||||
|
"avbtool",
|
||||||
|
],
|
||||||
|
}
|
98
kernel_build/mkbootimg/gki/boot_signature_info.sh
Executable file
98
kernel_build/mkbootimg/gki/boot_signature_info.sh
Executable file
|
@ -0,0 +1,98 @@
|
||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Copyright (C) 2022 The Android Open Source Project
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
#
|
||||||
|
# Dump boot signature info of a GKI boot image.
|
||||||
|
#
|
||||||
|
|
||||||
|
set -eo errtrace
|
||||||
|
|
||||||
|
die() {
|
||||||
|
echo >&2 "ERROR:" "${@}"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
TEMP_DIR="$(mktemp -d)"
|
||||||
|
readonly TEMP_DIR
|
||||||
|
|
||||||
|
exit_handler() {
|
||||||
|
readonly EXIT_CODE="$?"
|
||||||
|
rm -rf "${TEMP_DIR}" ||:
|
||||||
|
exit "${EXIT_CODE}"
|
||||||
|
}
|
||||||
|
|
||||||
|
trap exit_handler EXIT
|
||||||
|
trap 'die "line ${LINENO}, ${FUNCNAME:-<main>}(): \"${BASH_COMMAND}\" returned \"$?\"" ' ERR
|
||||||
|
|
||||||
|
get_arg() {
|
||||||
|
local arg="$1"
|
||||||
|
shift
|
||||||
|
while [[ "$#" -gt 0 ]]; do
|
||||||
|
if [[ "$1" == "${arg}" ]]; then
|
||||||
|
shift
|
||||||
|
echo "$1"
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
shift
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
readonly VBMETA_IMAGE="${TEMP_DIR}/boot.boot_signature"
|
||||||
|
readonly VBMETA_IMAGE_TEMP="${VBMETA_IMAGE}.temp"
|
||||||
|
readonly VBMETA_INFO="${VBMETA_IMAGE}.info"
|
||||||
|
readonly BOOT_IMAGE="${TEMP_DIR}/boot.img"
|
||||||
|
readonly BOOT_IMAGE_DIR="${TEMP_DIR}/boot.unpack_dir"
|
||||||
|
readonly BOOT_IMAGE_ARGS="${TEMP_DIR}/boot.mkbootimg_args"
|
||||||
|
readonly BOOT_SIGNATURE_SIZE=$(( 16 << 10 ))
|
||||||
|
|
||||||
|
[[ -f "$1" ]] ||
|
||||||
|
die "expected one input image"
|
||||||
|
cp "$1" "${BOOT_IMAGE}"
|
||||||
|
|
||||||
|
# This could fail if there already is no AVB footer.
|
||||||
|
avbtool erase_footer --image "${BOOT_IMAGE}" 2>/dev/null ||:
|
||||||
|
|
||||||
|
unpack_bootimg --boot_img "${BOOT_IMAGE}" --out "${BOOT_IMAGE_DIR}" \
|
||||||
|
--format=mkbootimg -0 > "${BOOT_IMAGE_ARGS}"
|
||||||
|
|
||||||
|
declare -a boot_args=()
|
||||||
|
while IFS= read -r -d '' ARG; do
|
||||||
|
boot_args+=("${ARG}")
|
||||||
|
done < "${BOOT_IMAGE_ARGS}"
|
||||||
|
|
||||||
|
BOOT_IMAGE_VERSION="$(get_arg --header_version "${boot_args[@]}")"
|
||||||
|
if [[ "${BOOT_IMAGE_VERSION}" -ge 4 ]] && [[ -f "${BOOT_IMAGE_DIR}/boot_signature" ]]; then
|
||||||
|
cp "${BOOT_IMAGE_DIR}/boot_signature" "${VBMETA_IMAGE}"
|
||||||
|
else
|
||||||
|
tail -c "${BOOT_SIGNATURE_SIZE}" "${BOOT_IMAGE}" > "${VBMETA_IMAGE}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Keep carving out vbmeta image from the boot signature until we fail or EOF.
|
||||||
|
# Failing is fine because there could be padding trailing the boot signature.
|
||||||
|
while avbtool info_image --image "${VBMETA_IMAGE}" --output "${VBMETA_INFO}" 2>/dev/null; do
|
||||||
|
cat "${VBMETA_INFO}"
|
||||||
|
echo
|
||||||
|
|
||||||
|
declare -i H A X
|
||||||
|
H="$(cat "${VBMETA_INFO}" | grep 'Header Block:' | awk '{print $3}')"
|
||||||
|
A="$(cat "${VBMETA_INFO}" | grep 'Authentication Block:' | awk '{print $3}')"
|
||||||
|
X="$(cat "${VBMETA_INFO}" | grep 'Auxiliary Block:' | awk '{print $3}')"
|
||||||
|
vbmeta_size="$(( ${H} + ${A} + ${X} ))"
|
||||||
|
|
||||||
|
tail -c "+$(( ${vbmeta_size} + 1 ))" "${VBMETA_IMAGE}" > "${VBMETA_IMAGE_TEMP}"
|
||||||
|
cp "${VBMETA_IMAGE_TEMP}" "${VBMETA_IMAGE}"
|
||||||
|
done
|
310
kernel_build/mkbootimg/gki/certify_bootimg.py
Executable file
310
kernel_build/mkbootimg/gki/certify_bootimg.py
Executable file
|
@ -0,0 +1,310 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
#
|
||||||
|
# Copyright 2022, The Android Open Source Project
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
"""Certify a GKI boot image by generating and appending its boot_signature."""
|
||||||
|
|
||||||
|
from argparse import ArgumentParser
|
||||||
|
import glob
|
||||||
|
import os
|
||||||
|
import shlex
|
||||||
|
import shutil
|
||||||
|
import subprocess
|
||||||
|
import tempfile
|
||||||
|
|
||||||
|
from gki.generate_gki_certificate import generate_gki_certificate
|
||||||
|
from unpack_bootimg import unpack_bootimg
|
||||||
|
|
||||||
|
BOOT_SIGNATURE_SIZE = 16 * 1024
|
||||||
|
|
||||||
|
|
||||||
|
def get_kernel(boot_img):
|
||||||
|
"""Extracts the kernel from |boot_img| and returns it."""
|
||||||
|
with tempfile.TemporaryDirectory() as unpack_dir:
|
||||||
|
unpack_bootimg(boot_img, unpack_dir)
|
||||||
|
with open(os.path.join(unpack_dir, 'kernel'), 'rb') as kernel:
|
||||||
|
kernel_bytes = kernel.read()
|
||||||
|
assert len(kernel_bytes) > 0
|
||||||
|
return kernel_bytes
|
||||||
|
|
||||||
|
|
||||||
|
def add_certificate(boot_img, algorithm, key, extra_args):
|
||||||
|
"""Appends certificates to the end of the boot image.
|
||||||
|
|
||||||
|
This functions appends two certificates to the end of the |boot_img|:
|
||||||
|
the 'boot' certificate and the 'generic_kernel' certificate. The former
|
||||||
|
is to certify the entire |boot_img|, while the latter is to certify
|
||||||
|
the kernel inside the |boot_img|.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def generate_certificate(image, certificate_name):
|
||||||
|
"""Generates the certificate and returns the certificate content."""
|
||||||
|
with tempfile.NamedTemporaryFile() as output_certificate:
|
||||||
|
generate_gki_certificate(
|
||||||
|
image=image, avbtool='avbtool', name=certificate_name,
|
||||||
|
algorithm=algorithm, key=key, salt='d00df00d',
|
||||||
|
additional_avb_args=extra_args, output=output_certificate.name)
|
||||||
|
output_certificate.seek(os.SEEK_SET, 0)
|
||||||
|
return output_certificate.read()
|
||||||
|
|
||||||
|
boot_signature_bytes = b''
|
||||||
|
boot_signature_bytes += generate_certificate(boot_img, 'boot')
|
||||||
|
|
||||||
|
with tempfile.NamedTemporaryFile() as kernel_img:
|
||||||
|
kernel_img.write(get_kernel(boot_img))
|
||||||
|
kernel_img.flush()
|
||||||
|
boot_signature_bytes += generate_certificate(kernel_img.name,
|
||||||
|
'generic_kernel')
|
||||||
|
|
||||||
|
if len(boot_signature_bytes) > BOOT_SIGNATURE_SIZE:
|
||||||
|
raise ValueError(
|
||||||
|
f'boot_signature size must be <= {BOOT_SIGNATURE_SIZE}')
|
||||||
|
boot_signature_bytes += (
|
||||||
|
b'\0' * (BOOT_SIGNATURE_SIZE - len(boot_signature_bytes)))
|
||||||
|
assert len(boot_signature_bytes) == BOOT_SIGNATURE_SIZE
|
||||||
|
|
||||||
|
with open(boot_img, 'ab') as f:
|
||||||
|
f.write(boot_signature_bytes)
|
||||||
|
|
||||||
|
|
||||||
|
def erase_certificate_and_avb_footer(boot_img):
|
||||||
|
"""Erases the boot certificate and avb footer.
|
||||||
|
|
||||||
|
A boot image might already contain a certificate and/or a AVB footer.
|
||||||
|
This function erases these additional metadata from the |boot_img|.
|
||||||
|
"""
|
||||||
|
# Tries to erase the AVB footer first, which may or may not exist.
|
||||||
|
avbtool_cmd = ['avbtool', 'erase_footer', '--image', boot_img]
|
||||||
|
subprocess.run(avbtool_cmd, check=False, stderr=subprocess.DEVNULL)
|
||||||
|
assert os.path.getsize(boot_img) > 0
|
||||||
|
|
||||||
|
# No boot signature to erase, just return.
|
||||||
|
if os.path.getsize(boot_img) <= BOOT_SIGNATURE_SIZE:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Checks if the last 16K is a boot signature, then erases it.
|
||||||
|
with open(boot_img, 'rb') as image:
|
||||||
|
image.seek(-BOOT_SIGNATURE_SIZE, os.SEEK_END)
|
||||||
|
boot_signature = image.read(BOOT_SIGNATURE_SIZE)
|
||||||
|
assert len(boot_signature) == BOOT_SIGNATURE_SIZE
|
||||||
|
|
||||||
|
with tempfile.NamedTemporaryFile() as signature_tmpfile:
|
||||||
|
signature_tmpfile.write(boot_signature)
|
||||||
|
signature_tmpfile.flush()
|
||||||
|
avbtool_info_cmd = [
|
||||||
|
'avbtool', 'info_image', '--image', signature_tmpfile.name]
|
||||||
|
result = subprocess.run(avbtool_info_cmd, check=False,
|
||||||
|
stdout=subprocess.DEVNULL,
|
||||||
|
stderr=subprocess.DEVNULL)
|
||||||
|
has_boot_signature = (result.returncode == 0)
|
||||||
|
|
||||||
|
if has_boot_signature:
|
||||||
|
new_file_size = os.path.getsize(boot_img) - BOOT_SIGNATURE_SIZE
|
||||||
|
os.truncate(boot_img, new_file_size)
|
||||||
|
|
||||||
|
assert os.path.getsize(boot_img) > 0
|
||||||
|
|
||||||
|
|
||||||
|
def get_avb_image_size(image):
|
||||||
|
"""Returns the image size if there is a AVB footer, else return zero."""
|
||||||
|
|
||||||
|
avbtool_info_cmd = ['avbtool', 'info_image', '--image', image]
|
||||||
|
result = subprocess.run(avbtool_info_cmd, check=False,
|
||||||
|
stdout=subprocess.DEVNULL,
|
||||||
|
stderr=subprocess.DEVNULL)
|
||||||
|
|
||||||
|
if result.returncode == 0:
|
||||||
|
return os.path.getsize(image)
|
||||||
|
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
def add_avb_footer(image, partition_size, extra_footer_args):
|
||||||
|
"""Appends a AVB hash footer to the image."""
|
||||||
|
|
||||||
|
avbtool_cmd = ['avbtool', 'add_hash_footer', '--image', image,
|
||||||
|
'--partition_name', 'boot']
|
||||||
|
|
||||||
|
if partition_size:
|
||||||
|
avbtool_cmd.extend(['--partition_size', str(partition_size)])
|
||||||
|
else:
|
||||||
|
avbtool_cmd.extend(['--dynamic_partition_size'])
|
||||||
|
|
||||||
|
avbtool_cmd.extend(extra_footer_args)
|
||||||
|
subprocess.check_call(avbtool_cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def load_dict_from_file(path):
|
||||||
|
"""Loads key=value pairs from |path| and returns a dict."""
|
||||||
|
d = {}
|
||||||
|
with open(path, 'r', encoding='utf-8') as f:
|
||||||
|
for line in f:
|
||||||
|
line = line.strip()
|
||||||
|
if not line or line.startswith('#'):
|
||||||
|
continue
|
||||||
|
if '=' in line:
|
||||||
|
name, value = line.split('=', 1)
|
||||||
|
d[name] = value
|
||||||
|
return d
|
||||||
|
|
||||||
|
|
||||||
|
def load_gki_info_file(gki_info_file, extra_args, extra_footer_args):
|
||||||
|
"""Loads extra arguments from the gki info file.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
gki_info_file: path to a gki-info.txt.
|
||||||
|
extra_args: the extra arguments forwarded to avbtool when creating
|
||||||
|
the gki certificate.
|
||||||
|
extra_footer_args: the extra arguments forwarded to avbtool when
|
||||||
|
creating the avb footer.
|
||||||
|
|
||||||
|
"""
|
||||||
|
info_dict = load_dict_from_file(gki_info_file)
|
||||||
|
if 'certify_bootimg_extra_args' in info_dict:
|
||||||
|
extra_args.extend(
|
||||||
|
shlex.split(info_dict['certify_bootimg_extra_args']))
|
||||||
|
if 'certify_bootimg_extra_footer_args' in info_dict:
|
||||||
|
extra_footer_args.extend(
|
||||||
|
shlex.split(info_dict['certify_bootimg_extra_footer_args']))
|
||||||
|
|
||||||
|
|
||||||
|
def get_archive_name_and_format_for_shutil(path):
|
||||||
|
"""Returns archive name and format to shutil.make_archive() for the |path|.
|
||||||
|
|
||||||
|
e.g., returns ('/path/to/boot-img', 'gztar') if |path| is
|
||||||
|
'/path/to/boot-img.tar.gz'.
|
||||||
|
"""
|
||||||
|
for format_name, format_extensions, _ in shutil.get_unpack_formats():
|
||||||
|
for extension in format_extensions:
|
||||||
|
if path.endswith(extension):
|
||||||
|
return path[:-len(extension)], format_name
|
||||||
|
|
||||||
|
raise ValueError(f"Unsupported archive format: '{path}'")
|
||||||
|
|
||||||
|
|
||||||
|
def parse_cmdline():
|
||||||
|
"""Parse command-line options."""
|
||||||
|
parser = ArgumentParser(add_help=True)
|
||||||
|
|
||||||
|
# Required args.
|
||||||
|
input_group = parser.add_mutually_exclusive_group(required=True)
|
||||||
|
input_group.add_argument(
|
||||||
|
'--boot_img', help='path to the boot image to certify')
|
||||||
|
input_group.add_argument(
|
||||||
|
'--boot_img_archive', help='path to the boot images archive to certify')
|
||||||
|
|
||||||
|
parser.add_argument('--algorithm', required=True,
|
||||||
|
help='signing algorithm for the certificate')
|
||||||
|
parser.add_argument('--key', required=True,
|
||||||
|
help='path to the RSA private key')
|
||||||
|
parser.add_argument('--gki_info',
|
||||||
|
help='path to a gki-info.txt to append additional'
|
||||||
|
'properties into the boot signature')
|
||||||
|
parser.add_argument('-o', '--output', required=True,
|
||||||
|
help='output file name')
|
||||||
|
|
||||||
|
# Optional args.
|
||||||
|
parser.add_argument('--extra_args', default=[], action='append',
|
||||||
|
help='extra arguments to be forwarded to avbtool')
|
||||||
|
parser.add_argument('--extra_footer_args', default=[], action='append',
|
||||||
|
help='extra arguments for adding the avb footer')
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
if args.gki_info and args.boot_img_archive:
|
||||||
|
parser.error('--gki_info cannot be used with --boot_image_archive. '
|
||||||
|
'The gki_info file should be included in the archive.')
|
||||||
|
|
||||||
|
extra_args = []
|
||||||
|
for a in args.extra_args:
|
||||||
|
extra_args.extend(shlex.split(a))
|
||||||
|
args.extra_args = extra_args
|
||||||
|
|
||||||
|
extra_footer_args = []
|
||||||
|
for a in args.extra_footer_args:
|
||||||
|
extra_footer_args.extend(shlex.split(a))
|
||||||
|
args.extra_footer_args = extra_footer_args
|
||||||
|
|
||||||
|
if args.gki_info:
|
||||||
|
load_gki_info_file(args.gki_info,
|
||||||
|
args.extra_args,
|
||||||
|
args.extra_footer_args)
|
||||||
|
|
||||||
|
return args
|
||||||
|
|
||||||
|
|
||||||
|
def certify_bootimg(boot_img, output_img, algorithm, key, extra_args,
|
||||||
|
extra_footer_args):
|
||||||
|
"""Certify a GKI boot image by generating and appending a boot_signature."""
|
||||||
|
with tempfile.TemporaryDirectory() as temp_dir:
|
||||||
|
boot_tmp = os.path.join(temp_dir, 'boot.tmp')
|
||||||
|
shutil.copy2(boot_img, boot_tmp)
|
||||||
|
|
||||||
|
erase_certificate_and_avb_footer(boot_tmp)
|
||||||
|
add_certificate(boot_tmp, algorithm, key, extra_args)
|
||||||
|
|
||||||
|
avb_partition_size = get_avb_image_size(boot_img)
|
||||||
|
add_avb_footer(boot_tmp, avb_partition_size, extra_footer_args)
|
||||||
|
|
||||||
|
# We're done, copy the temp image to the final output.
|
||||||
|
shutil.copy2(boot_tmp, output_img)
|
||||||
|
|
||||||
|
|
||||||
|
def certify_bootimg_archive(boot_img_archive, output_archive,
|
||||||
|
algorithm, key, extra_args, extra_footer_args):
|
||||||
|
"""Similar to certify_bootimg(), but for an archive of boot images."""
|
||||||
|
with tempfile.TemporaryDirectory() as unpack_dir:
|
||||||
|
shutil.unpack_archive(boot_img_archive, unpack_dir)
|
||||||
|
|
||||||
|
gki_info_file = os.path.join(unpack_dir, 'gki-info.txt')
|
||||||
|
if os.path.exists(gki_info_file):
|
||||||
|
load_gki_info_file(gki_info_file, extra_args, extra_footer_args)
|
||||||
|
|
||||||
|
for boot_img in glob.glob(os.path.join(unpack_dir, 'boot*.img')):
|
||||||
|
print(f'Certifying {os.path.basename(boot_img)} ...')
|
||||||
|
certify_bootimg(boot_img=boot_img, output_img=boot_img,
|
||||||
|
algorithm=algorithm, key=key, extra_args=extra_args,
|
||||||
|
extra_footer_args=extra_footer_args)
|
||||||
|
|
||||||
|
print(f'Making certified archive: {output_archive}')
|
||||||
|
archive_file_name, archive_format = (
|
||||||
|
get_archive_name_and_format_for_shutil(output_archive))
|
||||||
|
built_archive = shutil.make_archive(archive_file_name,
|
||||||
|
archive_format,
|
||||||
|
unpack_dir)
|
||||||
|
# shutil.make_archive() builds *.tar.gz when then |archive_format| is
|
||||||
|
# 'gztar'. However, the end user might specify |output_archive| with
|
||||||
|
# *.tgz. Renaming *.tar.gz to *.tgz for this case.
|
||||||
|
if built_archive != os.path.realpath(output_archive):
|
||||||
|
print(f'Renaming {built_archive} -> {output_archive} ...')
|
||||||
|
os.rename(built_archive, output_archive)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
"""Parse arguments and certify the boot image."""
|
||||||
|
args = parse_cmdline()
|
||||||
|
|
||||||
|
if args.boot_img_archive:
|
||||||
|
certify_bootimg_archive(args.boot_img_archive, args.output,
|
||||||
|
args.algorithm, args.key, args.extra_args,
|
||||||
|
args.extra_footer_args)
|
||||||
|
else:
|
||||||
|
certify_bootimg(args.boot_img, args.output, args.algorithm,
|
||||||
|
args.key, args.extra_args, args.extra_footer_args)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
1134
kernel_build/mkbootimg/gki/certify_bootimg_test.py
Executable file
1134
kernel_build/mkbootimg/gki/certify_bootimg_test.py
Executable file
File diff suppressed because it is too large
Load diff
94
kernel_build/mkbootimg/gki/generate_gki_certificate.py
Executable file
94
kernel_build/mkbootimg/gki/generate_gki_certificate.py
Executable file
|
@ -0,0 +1,94 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
#
|
||||||
|
# Copyright 2021, The Android Open Source Project
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
"""Generate a Generic Boot Image certificate suitable for VTS verification."""
|
||||||
|
|
||||||
|
from argparse import ArgumentParser
|
||||||
|
import shlex
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
|
||||||
|
def generate_gki_certificate(image, avbtool, name, algorithm, key, salt,
|
||||||
|
additional_avb_args, output):
|
||||||
|
"""Shell out to avbtool to generate a GKI certificate."""
|
||||||
|
|
||||||
|
# Need to specify a value of --partition_size for avbtool to work.
|
||||||
|
# We use 64 MB below, but avbtool will not resize the boot image to
|
||||||
|
# this size because --do_not_append_vbmeta_image is also specified.
|
||||||
|
avbtool_cmd = [
|
||||||
|
avbtool, 'add_hash_footer',
|
||||||
|
'--partition_name', name,
|
||||||
|
'--partition_size', str(64 * 1024 * 1024),
|
||||||
|
'--image', image,
|
||||||
|
'--algorithm', algorithm,
|
||||||
|
'--key', key,
|
||||||
|
'--do_not_append_vbmeta_image',
|
||||||
|
'--output_vbmeta_image', output,
|
||||||
|
]
|
||||||
|
|
||||||
|
if salt is not None:
|
||||||
|
avbtool_cmd += ['--salt', salt]
|
||||||
|
|
||||||
|
avbtool_cmd += additional_avb_args
|
||||||
|
|
||||||
|
subprocess.check_call(avbtool_cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def parse_cmdline():
|
||||||
|
parser = ArgumentParser(add_help=True)
|
||||||
|
|
||||||
|
# Required args.
|
||||||
|
parser.add_argument('image', help='path to the image')
|
||||||
|
parser.add_argument('-o', '--output', required=True,
|
||||||
|
help='output certificate file name')
|
||||||
|
parser.add_argument('--name', required=True,
|
||||||
|
choices=['boot', 'generic_kernel'],
|
||||||
|
help='name of the image to be certified')
|
||||||
|
parser.add_argument('--algorithm', required=True,
|
||||||
|
help='AVB signing algorithm')
|
||||||
|
parser.add_argument('--key', required=True,
|
||||||
|
help='path to the RSA private key')
|
||||||
|
|
||||||
|
# Optional args.
|
||||||
|
parser.add_argument('--avbtool', default='avbtool',
|
||||||
|
help='path to the avbtool executable')
|
||||||
|
parser.add_argument('--salt', help='salt to use when computing image hash')
|
||||||
|
parser.add_argument('--additional_avb_args', default=[], action='append',
|
||||||
|
help='additional arguments to be forwarded to avbtool')
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
additional_avb_args = []
|
||||||
|
for a in args.additional_avb_args:
|
||||||
|
additional_avb_args.extend(shlex.split(a))
|
||||||
|
args.additional_avb_args = additional_avb_args
|
||||||
|
|
||||||
|
return args
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
args = parse_cmdline()
|
||||||
|
generate_gki_certificate(
|
||||||
|
image=args.image, avbtool=args.avbtool, name=args.name,
|
||||||
|
algorithm=args.algorithm, key=args.key, salt=args.salt,
|
||||||
|
additional_avb_args=args.additional_avb_args,
|
||||||
|
output=args.output,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
27
kernel_build/mkbootimg/gki/testdata/testkey_rsa2048.pem
vendored
Executable file
27
kernel_build/mkbootimg/gki/testdata/testkey_rsa2048.pem
vendored
Executable file
|
@ -0,0 +1,27 @@
|
||||||
|
-----BEGIN RSA PRIVATE KEY-----
|
||||||
|
MIIEowIBAAKCAQEAxlVR3TIkouAOvH79vaJTgFhpfvVKQIeVkFRZPVXK/zY0Gvrh
|
||||||
|
4JAqGjJoW/PfrQv5sdD36qtHH3a+G5hLZ6Ni+t/mtfjucxZfuLGC3kmJ1T3XqEKZ
|
||||||
|
gXXI2IR7vVSoImREvDQGEDyJwtHzLANlkbGg0cghVhWZSCAndO8BenalC2v94/rt
|
||||||
|
DfkPekH6dgU3Sf40T0sBSeSY94mOzTaqOR2pfV1rWlLRdWmo33zeHBv52Rlbt0dM
|
||||||
|
uXAureXWiHztkm5GCBC1dgM+CaxNtizNEgC91KcD0xuRCCM2WxH+r1lpszyIJDct
|
||||||
|
YbrFmVEYl/kjQpafhy7Nsk1fqSTyRdriZSYmTQIDAQABAoIBAQC+kJgaCuX8wYAn
|
||||||
|
SXWQ0fmdZlXnMNRpcF0a0pD0SAzGb1RdYBXMaXiqtyhiwc53PPxsCDdNecjayIMd
|
||||||
|
jJVXPTwLhTruOgMS/bp3gcgWwV34UHV4LJXGOGAE+jbS0hbDBMiudOYmj6RmVshp
|
||||||
|
z9G1zZCSQNMXHaWsEYkX59XpzzoB384nRul2QgEtwzUNR9XlpzgtJBLk3SACkvsN
|
||||||
|
mQ/DW8IWHXLg8vLn1LzVJ2e3B16H4MoE2TCHxqfMgr03IDRRJogkenQuQsFhevYT
|
||||||
|
o/mJyHSWavVgzMHG9I5m+eepF4Wyhj1Y4WyKAuMI+9dHAX/h7Lt8XFCQCh5DbkVG
|
||||||
|
zGr34sWBAoGBAOs7n7YZqNaaguovfIdRRsxxZr1yJAyDsr6w3yGImDZYju4c4WY9
|
||||||
|
5esO2kP3FA4p0c7FhQF5oOb1rBuHEPp36cpL4aGeK87caqTfq63WZAujoTZpr9Lp
|
||||||
|
BRbkL7w/xG7jpQ/clpA8sHzHGQs/nelxoOtC7E118FiRgvD/jdhlMyL9AoGBANfX
|
||||||
|
vyoN1pplfT2xR8QOjSZ+Q35S/+SAtMuBnHx3l0qH2bbBjcvM1MNDWjnRDyaYhiRu
|
||||||
|
i+KA7tqfib09+XpB3g5D6Ov7ls/Ldx0S/VcmVWtia2HK8y8iLGtokoBZKQ5AaFX2
|
||||||
|
iQU8+tC4h69GnJYQKqNwgCUzh8+gHX5Y46oDiTmRAoGAYpOx8lX+czB8/Da6MNrW
|
||||||
|
mIZNT8atZLEsDs2ANEVRxDSIcTCZJId7+m1W+nRoaycLTWNowZ1+2ErLvR10+AGY
|
||||||
|
b7Ys79Wg9idYaY9yGn9lnZsMzAiuLeyIvXcSqgjvAKlVWrhOQFOughvNWvFl85Yy
|
||||||
|
oWSCMlPiTLtt7CCsCKsgKuECgYBgdIp6GZsIfkgclKe0hqgvRoeU4TR3gcjJlM9A
|
||||||
|
lBTo+pKhaBectplx9RxR8AnsPobbqwcaHnIfAuKDzjk5mEvKZjClnFXF4HAHbyAF
|
||||||
|
nRzZEy9XkWFhc80T5rRpZO7C7qdxmu2aiKixM3V3L3/0U58qULEDbubHMw9bEhAT
|
||||||
|
PudI8QKBgHEEiMm/hr9T41hbQi/LYanWnlFw1ue+osKuF8bXQuxnnHNuFT/c+9/A
|
||||||
|
vWhgqG6bOEHu+p/IPrYm4tBMYlwsyh4nXCyGgDJLbLIfzKwKAWCtH9LwnyDVhOow
|
||||||
|
GH9shdR+sW3Ew97xef02KAH4VlNANEmBV4sQNqWWvsYrcFm2rOdL
|
||||||
|
-----END RSA PRIVATE KEY-----
|
51
kernel_build/mkbootimg/gki/testdata/testkey_rsa4096.pem
vendored
Executable file
51
kernel_build/mkbootimg/gki/testdata/testkey_rsa4096.pem
vendored
Executable file
|
@ -0,0 +1,51 @@
|
||||||
|
-----BEGIN RSA PRIVATE KEY-----
|
||||||
|
MIIJKQIBAAKCAgEA2ASv49OEbH4NiT3CjNMSVeliyfEPXswWcqtEfCxlSpS1FisA
|
||||||
|
uwbvEwdTTPlkuSh6G4SYiNhnpCP5p0vcSg/3OhiuVKgV/rCtrDXaO60nvK/o0y83
|
||||||
|
NNZRK2xaJ9eWBq9ruIDK+jC0sYWzTaqqwxY0Grjnx/r5CXerl5PrRK7PILzwgBHb
|
||||||
|
IwxHcblt1ntgR4cWVpO3wiqasEwBDDDYk4fw7W6LvjBb9qav3YB8RV6PkZNeRP64
|
||||||
|
ggfuecq/MXNiWOPNxLzCER2hSr/+J32h9jWjXsrcVy8+8Mldhmr4r2an7c247aFf
|
||||||
|
upuFGtUJrpROO8/LXMl5gPfMpkqoatjTMRH59gJjKhot0RpmGxZBvb33TcBK5SdJ
|
||||||
|
X39Y4yct5clmDlI4Fjj7FutTP+b96aJeJVnYeUX/A0wmogBajsJRoRX5e/RcgZsY
|
||||||
|
RzXYLQXprQ81dBWjjovMJ9p8XeT6BNMFC7o6sklFL0fHDUE/l4BNP8G1u3Bfpzev
|
||||||
|
SCISRS71D4eS4oQB+RIPFBUkzomZ7rnEF3BwFeq+xmwfYrP0LRaH+1YeRauuMuRe
|
||||||
|
ke1TZl697a3mEjkNg8noa2wtpe7EWmaujJfXDWxJx/XEkjGLCe4z2qk3tkkY+A5g
|
||||||
|
Rcgzke8gVxC+eC2DJtbKYfkv4L8FMFJaEhwAp13MfC7FlYujO/BDLl7dANsCAwEA
|
||||||
|
AQKCAgAWoL8P/WsktjuSwb5sY/vKtgzcHH1Ar942GsysuTXPDy686LpF3R8T/jNy
|
||||||
|
n7k2UBAia8xSoWCR6BbRuHeV5oA+PLGeOpE7QaSfonB+yc+cy0x3Or3ssfqEsu/q
|
||||||
|
toGHp75/8DXS6WE0K04x94u1rdC9b9sPrrGBlWCLGzqM0kbuJfyHXdd3n2SofAUO
|
||||||
|
b5QRSgxD+2tHUpEroHqHnWJCaf4J0QegX45yktlfOYNK/PHLDQXV8ly/ejc32M4Y
|
||||||
|
Tv7hUtOOJTuq8VCg9OWZm2Zo1QuM9XEJTPCp5l3+o5vzO6yhk2gotDvD32CdA+3k
|
||||||
|
tLJRP54M1Sn+IXb1gGKN9rKAtGJbenWIPlNObhQgkbwG89Qd+5rfMXsiPv1Hl1tK
|
||||||
|
+tqwjD82/H3/ElaaMnwHCpeoGSp95OblAoBjzjMP2KsbvKSdL8O/rf1c3uOw9+DF
|
||||||
|
cth0SA8y3ZzI11gJtb2QMGUrCny5n4sPGGbc3x38NdLhwbkPKZy60OiT4g2kNpdY
|
||||||
|
dIitmAML2otttiF4AJM6AraPk8YVzkPLTksoL3azPBya5lIoDI2H3QvTtSvpXkXP
|
||||||
|
yKchsDSWYbdqfplqC/X0Djp2/Zd8jpN5I6+1aSmpTmbwx/JTllY1N89FRZLIdxoh
|
||||||
|
2k81LPiXhE6uRbjioJUlbnEWIpY2y2N2Clmxpjh0/IcXd1XImQKCAQEA7Zai+yjj
|
||||||
|
8xit24aO9Tf3mZBXBjSaDodjC2KS1yCcAIXp6S7aH0wZipyZpQjys3zaBQyMRYFG
|
||||||
|
bQqIfVAa6inWyDoofbAJHMu5BVcHFBPZvSS5YhDjc8XZ5dqSCxzIz9opIqAbm+b4
|
||||||
|
aEV/3A3Jki5Dy8y/5j21GAK4Y4mqQOYzne7bDGi3Hyu041MGM4qfIcIkS5N1eHW4
|
||||||
|
sDZJh6+K5tuxN5TX3nDZSpm9luNH8mLGgKAZ15b1LqXAtM5ycoBY9Hv082suPPom
|
||||||
|
O+r0ybdRX6nDSH8+11y2KiP2kdVIUHCGkwlqgrux5YZyjCZPwOvEPhzSoOS+vBiF
|
||||||
|
UVXA8idnxNLk1QKCAQEA6MIihDSXx+350fWqhQ/3Qc6gA/t2C15JwJ9+uFWA+gjd
|
||||||
|
c/hn5HcmnmBJN4R04nLG/aU9SQur87a4mnC/Mp9JIARjHlZ/WNT4U0sJyPEVRg5U
|
||||||
|
Z9VajAucWwi0JyJYCO1EMMy68Jp8qlTriK/L7nbD86JJ5ASxjojiN/0psK/Pk60F
|
||||||
|
Rr+shKPi3jRQ1BDjDtAxOfo4ctf/nFbUM4bY0FNPQMP7WesoSKU0NBCRR6d0d2tq
|
||||||
|
YflMjIQHx+N74P5jEdSCHTVGQm+dj47pUt3lLPLWc0bX1G/GekwXP4NUsR/70Hsi
|
||||||
|
bwxkNnK2TSGzkt2rcOnutP125rJu6WpV7SNrq9rm7wKCAQAfMROcnbWviKHqnDPQ
|
||||||
|
hdR/2K9UJTvEhInASOS2UZWpi+s1rez9BuSjigOx4wbaAZ4t44PW7C3uyt84dHfU
|
||||||
|
HkIQb3I5bg8ENMrJpK9NN33ykwuzkDwMSwFcZ+Gci97hSubzoMl/IkeiiN1MapL4
|
||||||
|
GhLUgsD+3UMVL+Y9SymK8637IgyoCGdiND6/SXsa8SwLJo3VTjqx4eKpX7cvlSBL
|
||||||
|
RrRxc50TmwUsAhsd4CDl9YnSATLjVvJBeYlfM2tbFPaYwl1aR8v+PWkfnK0efm60
|
||||||
|
fHki33HEnGteBPKuGq4vwVYpn6bYGwQz+f6335/A2DMfZHFSpjVURHPcRcHbCMla
|
||||||
|
0cUxAoIBAQC25eYNkO478mo+bBbEXJlkoqLmvjAyGrNFo48F9lpVH6Y0vNuWkXJN
|
||||||
|
PUgLUhAu6RYotjGENqG17rz8zt/PPY9Ok2P3sOx8t00y1mIn/hlDZXs55FM0fOMu
|
||||||
|
PZaiscAPs7HDzvyOmDah+fzi+ZD8H2M3DS2W+YE0iaeJa2vZJS2t02W0BGXiDI33
|
||||||
|
IZDqMyLYvwwPjOnShJydEzXID4xLl0tNjzLxo3GSNA7jYqlmbtV8CXIc7rMSL6WV
|
||||||
|
ktIDKKJcnmpn3TcKeX6MEjaSIT82pNOS3fY3PmXuL+CMzfw8+u77Eecq78fHaTiL
|
||||||
|
P5JGM93F6mzi19EY0tmInUBMCWtQLcENAoIBAQCg0KaOkb8T36qzPrtgbfou0E2D
|
||||||
|
ufdpL1ugmD4edOFKQB5fDFQhLnSEVSJq3KUg4kWsXapQdsBd6kLdxS+K6MQrLBzr
|
||||||
|
4tf0c7UCF1AzWk6wXMExZ8mRb2RkGZYQB2DdyhFB3TPmnq9CW8JCq+6kxg/wkU4s
|
||||||
|
vM4JXzgcqVoSf42QJl+B9waeWhg0BTWx01lal4ds88HvEKmE0ik5GwiDbr7EvDDw
|
||||||
|
E6UbZtQcIoSTIIZDgYqVFfR2DAho3wXJRsOXh433lEJ8X7cCDzrngFbQnlKrpwML
|
||||||
|
Xgm0SIUc+Nf5poMM3rfLFK77t/ob4w+5PwRKcoSniyAxrHd6bwykYA8Vuydv
|
||||||
|
-----END RSA PRIVATE KEY-----
|
666
kernel_build/mkbootimg/mkbootimg.py
Executable file
666
kernel_build/mkbootimg/mkbootimg.py
Executable file
|
@ -0,0 +1,666 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
#
|
||||||
|
# Copyright 2015, The Android Open Source Project
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""Creates the boot image."""
|
||||||
|
|
||||||
|
from argparse import (ArgumentParser, ArgumentTypeError,
|
||||||
|
FileType, RawDescriptionHelpFormatter)
|
||||||
|
from hashlib import sha1
|
||||||
|
from os import fstat
|
||||||
|
from struct import pack
|
||||||
|
|
||||||
|
import array
|
||||||
|
import collections
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import tempfile
|
||||||
|
|
||||||
|
from gki.generate_gki_certificate import generate_gki_certificate
|
||||||
|
|
||||||
|
# Constant and structure definition is in
|
||||||
|
# system/tools/mkbootimg/include/bootimg/bootimg.h
|
||||||
|
BOOT_MAGIC = 'ANDROID!'
|
||||||
|
BOOT_MAGIC_SIZE = 8
|
||||||
|
BOOT_NAME_SIZE = 16
|
||||||
|
BOOT_ARGS_SIZE = 512
|
||||||
|
BOOT_EXTRA_ARGS_SIZE = 1024
|
||||||
|
BOOT_IMAGE_HEADER_V1_SIZE = 1648
|
||||||
|
BOOT_IMAGE_HEADER_V2_SIZE = 1660
|
||||||
|
BOOT_IMAGE_HEADER_V3_SIZE = 1580
|
||||||
|
BOOT_IMAGE_HEADER_V3_PAGESIZE = 4096
|
||||||
|
BOOT_IMAGE_HEADER_V4_SIZE = 1584
|
||||||
|
BOOT_IMAGE_V4_SIGNATURE_SIZE = 4096
|
||||||
|
|
||||||
|
VENDOR_BOOT_MAGIC = 'VNDRBOOT'
|
||||||
|
VENDOR_BOOT_MAGIC_SIZE = 8
|
||||||
|
VENDOR_BOOT_NAME_SIZE = BOOT_NAME_SIZE
|
||||||
|
VENDOR_BOOT_ARGS_SIZE = 2048
|
||||||
|
VENDOR_BOOT_IMAGE_HEADER_V3_SIZE = 2112
|
||||||
|
VENDOR_BOOT_IMAGE_HEADER_V4_SIZE = 2128
|
||||||
|
|
||||||
|
VENDOR_RAMDISK_TYPE_NONE = 0
|
||||||
|
VENDOR_RAMDISK_TYPE_PLATFORM = 1
|
||||||
|
VENDOR_RAMDISK_TYPE_RECOVERY = 2
|
||||||
|
VENDOR_RAMDISK_TYPE_DLKM = 3
|
||||||
|
VENDOR_RAMDISK_NAME_SIZE = 32
|
||||||
|
VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE = 16
|
||||||
|
VENDOR_RAMDISK_TABLE_ENTRY_V4_SIZE = 108
|
||||||
|
|
||||||
|
# Names with special meaning, mustn't be specified in --ramdisk_name.
|
||||||
|
VENDOR_RAMDISK_NAME_BLOCKLIST = {b'default'}
|
||||||
|
|
||||||
|
PARSER_ARGUMENT_VENDOR_RAMDISK_FRAGMENT = '--vendor_ramdisk_fragment'
|
||||||
|
|
||||||
|
|
||||||
|
def filesize(f):
|
||||||
|
if f is None:
|
||||||
|
return 0
|
||||||
|
try:
|
||||||
|
return fstat(f.fileno()).st_size
|
||||||
|
except OSError:
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
def update_sha(sha, f):
|
||||||
|
if f:
|
||||||
|
sha.update(f.read())
|
||||||
|
f.seek(0)
|
||||||
|
sha.update(pack('I', filesize(f)))
|
||||||
|
else:
|
||||||
|
sha.update(pack('I', 0))
|
||||||
|
|
||||||
|
|
||||||
|
def pad_file(f, padding):
|
||||||
|
pad = (padding - (f.tell() & (padding - 1))) & (padding - 1)
|
||||||
|
f.write(pack(str(pad) + 'x'))
|
||||||
|
|
||||||
|
|
||||||
|
def get_number_of_pages(image_size, page_size):
|
||||||
|
"""calculates the number of pages required for the image"""
|
||||||
|
return (image_size + page_size - 1) // page_size
|
||||||
|
|
||||||
|
|
||||||
|
def get_recovery_dtbo_offset(args):
|
||||||
|
"""calculates the offset of recovery_dtbo image in the boot image"""
|
||||||
|
num_header_pages = 1 # header occupies a page
|
||||||
|
num_kernel_pages = get_number_of_pages(filesize(args.kernel), args.pagesize)
|
||||||
|
num_ramdisk_pages = get_number_of_pages(filesize(args.ramdisk),
|
||||||
|
args.pagesize)
|
||||||
|
num_second_pages = get_number_of_pages(filesize(args.second), args.pagesize)
|
||||||
|
dtbo_offset = args.pagesize * (num_header_pages + num_kernel_pages +
|
||||||
|
num_ramdisk_pages + num_second_pages)
|
||||||
|
return dtbo_offset
|
||||||
|
|
||||||
|
|
||||||
|
def should_add_legacy_gki_boot_signature(args):
|
||||||
|
if args.gki_signing_key and args.gki_signing_algorithm:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def write_header_v3_and_above(args):
|
||||||
|
if args.header_version > 3:
|
||||||
|
boot_header_size = BOOT_IMAGE_HEADER_V4_SIZE
|
||||||
|
else:
|
||||||
|
boot_header_size = BOOT_IMAGE_HEADER_V3_SIZE
|
||||||
|
|
||||||
|
args.output.write(pack(f'{BOOT_MAGIC_SIZE}s', BOOT_MAGIC.encode()))
|
||||||
|
# kernel size in bytes
|
||||||
|
args.output.write(pack('I', filesize(args.kernel)))
|
||||||
|
# ramdisk size in bytes
|
||||||
|
args.output.write(pack('I', filesize(args.ramdisk)))
|
||||||
|
# os version and patch level
|
||||||
|
args.output.write(pack('I', (args.os_version << 11) | args.os_patch_level))
|
||||||
|
args.output.write(pack('I', boot_header_size))
|
||||||
|
# reserved
|
||||||
|
args.output.write(pack('4I', 0, 0, 0, 0))
|
||||||
|
# version of boot image header
|
||||||
|
args.output.write(pack('I', args.header_version))
|
||||||
|
args.output.write(pack(f'{BOOT_ARGS_SIZE + BOOT_EXTRA_ARGS_SIZE}s',
|
||||||
|
args.cmdline))
|
||||||
|
if args.header_version >= 4:
|
||||||
|
# The signature used to verify boot image v4.
|
||||||
|
boot_signature_size = 0
|
||||||
|
if should_add_legacy_gki_boot_signature(args):
|
||||||
|
boot_signature_size = BOOT_IMAGE_V4_SIGNATURE_SIZE
|
||||||
|
args.output.write(pack('I', boot_signature_size))
|
||||||
|
pad_file(args.output, BOOT_IMAGE_HEADER_V3_PAGESIZE)
|
||||||
|
|
||||||
|
|
||||||
|
def write_vendor_boot_header(args):
|
||||||
|
if args.header_version > 3:
|
||||||
|
vendor_ramdisk_size = args.vendor_ramdisk_total_size
|
||||||
|
vendor_boot_header_size = VENDOR_BOOT_IMAGE_HEADER_V4_SIZE
|
||||||
|
else:
|
||||||
|
vendor_ramdisk_size = filesize(args.vendor_ramdisk)
|
||||||
|
vendor_boot_header_size = VENDOR_BOOT_IMAGE_HEADER_V3_SIZE
|
||||||
|
|
||||||
|
args.vendor_boot.write(pack(f'{VENDOR_BOOT_MAGIC_SIZE}s',
|
||||||
|
VENDOR_BOOT_MAGIC.encode()))
|
||||||
|
# version of boot image header
|
||||||
|
args.vendor_boot.write(pack('I', args.header_version))
|
||||||
|
# flash page size
|
||||||
|
args.vendor_boot.write(pack('I', args.pagesize))
|
||||||
|
# kernel physical load address
|
||||||
|
args.vendor_boot.write(pack('I', args.base + args.kernel_offset))
|
||||||
|
# ramdisk physical load address
|
||||||
|
args.vendor_boot.write(pack('I', args.base + args.ramdisk_offset))
|
||||||
|
# ramdisk size in bytes
|
||||||
|
args.vendor_boot.write(pack('I', vendor_ramdisk_size))
|
||||||
|
args.vendor_boot.write(pack(f'{VENDOR_BOOT_ARGS_SIZE}s',
|
||||||
|
args.vendor_cmdline))
|
||||||
|
# kernel tags physical load address
|
||||||
|
args.vendor_boot.write(pack('I', args.base + args.tags_offset))
|
||||||
|
# asciiz product name
|
||||||
|
args.vendor_boot.write(pack(f'{VENDOR_BOOT_NAME_SIZE}s', args.board))
|
||||||
|
|
||||||
|
# header size in bytes
|
||||||
|
args.vendor_boot.write(pack('I', vendor_boot_header_size))
|
||||||
|
|
||||||
|
# dtb size in bytes
|
||||||
|
args.vendor_boot.write(pack('I', filesize(args.dtb)))
|
||||||
|
# dtb physical load address
|
||||||
|
args.vendor_boot.write(pack('Q', args.base + args.dtb_offset))
|
||||||
|
|
||||||
|
if args.header_version > 3:
|
||||||
|
vendor_ramdisk_table_size = (args.vendor_ramdisk_table_entry_num *
|
||||||
|
VENDOR_RAMDISK_TABLE_ENTRY_V4_SIZE)
|
||||||
|
# vendor ramdisk table size in bytes
|
||||||
|
args.vendor_boot.write(pack('I', vendor_ramdisk_table_size))
|
||||||
|
# number of vendor ramdisk table entries
|
||||||
|
args.vendor_boot.write(pack('I', args.vendor_ramdisk_table_entry_num))
|
||||||
|
# vendor ramdisk table entry size in bytes
|
||||||
|
args.vendor_boot.write(pack('I', VENDOR_RAMDISK_TABLE_ENTRY_V4_SIZE))
|
||||||
|
# bootconfig section size in bytes
|
||||||
|
args.vendor_boot.write(pack('I', filesize(args.vendor_bootconfig)))
|
||||||
|
pad_file(args.vendor_boot, args.pagesize)
|
||||||
|
|
||||||
|
|
||||||
|
def write_header(args):
|
||||||
|
if args.header_version > 4:
|
||||||
|
raise ValueError(
|
||||||
|
f'Boot header version {args.header_version} not supported')
|
||||||
|
if args.header_version in {3, 4}:
|
||||||
|
return write_header_v3_and_above(args)
|
||||||
|
|
||||||
|
ramdisk_load_address = ((args.base + args.ramdisk_offset)
|
||||||
|
if filesize(args.ramdisk) > 0 else 0)
|
||||||
|
second_load_address = ((args.base + args.second_offset)
|
||||||
|
if filesize(args.second) > 0 else 0)
|
||||||
|
|
||||||
|
args.output.write(pack(f'{BOOT_MAGIC_SIZE}s', BOOT_MAGIC.encode()))
|
||||||
|
# kernel size in bytes
|
||||||
|
args.output.write(pack('I', filesize(args.kernel)))
|
||||||
|
# kernel physical load address
|
||||||
|
args.output.write(pack('I', args.base + args.kernel_offset))
|
||||||
|
# ramdisk size in bytes
|
||||||
|
args.output.write(pack('I', filesize(args.ramdisk)))
|
||||||
|
# ramdisk physical load address
|
||||||
|
args.output.write(pack('I', ramdisk_load_address))
|
||||||
|
# second bootloader size in bytes
|
||||||
|
args.output.write(pack('I', filesize(args.second)))
|
||||||
|
# second bootloader physical load address
|
||||||
|
args.output.write(pack('I', second_load_address))
|
||||||
|
# kernel tags physical load address
|
||||||
|
args.output.write(pack('I', args.base + args.tags_offset))
|
||||||
|
# flash page size
|
||||||
|
args.output.write(pack('I', args.pagesize))
|
||||||
|
# version of boot image header
|
||||||
|
args.output.write(pack('I', args.header_version))
|
||||||
|
# os version and patch level
|
||||||
|
args.output.write(pack('I', (args.os_version << 11) | args.os_patch_level))
|
||||||
|
# asciiz product name
|
||||||
|
args.output.write(pack(f'{BOOT_NAME_SIZE}s', args.board))
|
||||||
|
args.output.write(pack(f'{BOOT_ARGS_SIZE}s', args.cmdline))
|
||||||
|
|
||||||
|
sha = sha1()
|
||||||
|
update_sha(sha, args.kernel)
|
||||||
|
update_sha(sha, args.ramdisk)
|
||||||
|
update_sha(sha, args.second)
|
||||||
|
|
||||||
|
if args.header_version > 0:
|
||||||
|
update_sha(sha, args.recovery_dtbo)
|
||||||
|
if args.header_version > 1:
|
||||||
|
update_sha(sha, args.dtb)
|
||||||
|
|
||||||
|
img_id = pack('32s', sha.digest())
|
||||||
|
|
||||||
|
args.output.write(img_id)
|
||||||
|
args.output.write(pack(f'{BOOT_EXTRA_ARGS_SIZE}s', args.extra_cmdline))
|
||||||
|
|
||||||
|
if args.header_version > 0:
|
||||||
|
if args.recovery_dtbo:
|
||||||
|
# recovery dtbo size in bytes
|
||||||
|
args.output.write(pack('I', filesize(args.recovery_dtbo)))
|
||||||
|
# recovert dtbo offset in the boot image
|
||||||
|
args.output.write(pack('Q', get_recovery_dtbo_offset(args)))
|
||||||
|
else:
|
||||||
|
# Set to zero if no recovery dtbo
|
||||||
|
args.output.write(pack('I', 0))
|
||||||
|
args.output.write(pack('Q', 0))
|
||||||
|
|
||||||
|
# Populate boot image header size for header versions 1 and 2.
|
||||||
|
if args.header_version == 1:
|
||||||
|
args.output.write(pack('I', BOOT_IMAGE_HEADER_V1_SIZE))
|
||||||
|
elif args.header_version == 2:
|
||||||
|
args.output.write(pack('I', BOOT_IMAGE_HEADER_V2_SIZE))
|
||||||
|
|
||||||
|
if args.header_version > 1:
|
||||||
|
if filesize(args.dtb) == 0:
|
||||||
|
raise ValueError('DTB image must not be empty.')
|
||||||
|
|
||||||
|
# dtb size in bytes
|
||||||
|
args.output.write(pack('I', filesize(args.dtb)))
|
||||||
|
# dtb physical load address
|
||||||
|
args.output.write(pack('Q', args.base + args.dtb_offset))
|
||||||
|
|
||||||
|
pad_file(args.output, args.pagesize)
|
||||||
|
return img_id
|
||||||
|
|
||||||
|
|
||||||
|
class AsciizBytes:
|
||||||
|
"""Parses a string and encodes it as an asciiz bytes object.
|
||||||
|
|
||||||
|
>>> AsciizBytes(bufsize=4)('foo')
|
||||||
|
b'foo\\x00'
|
||||||
|
>>> AsciizBytes(bufsize=4)('foob')
|
||||||
|
Traceback (most recent call last):
|
||||||
|
...
|
||||||
|
argparse.ArgumentTypeError: Encoded asciiz length exceeded: max 4, got 5
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, bufsize):
|
||||||
|
self.bufsize = bufsize
|
||||||
|
|
||||||
|
def __call__(self, arg):
|
||||||
|
arg_bytes = arg.encode() + b'\x00'
|
||||||
|
if len(arg_bytes) > self.bufsize:
|
||||||
|
raise ArgumentTypeError(
|
||||||
|
'Encoded asciiz length exceeded: '
|
||||||
|
f'max {self.bufsize}, got {len(arg_bytes)}')
|
||||||
|
return arg_bytes
|
||||||
|
|
||||||
|
|
||||||
|
class VendorRamdiskTableBuilder:
|
||||||
|
"""Vendor ramdisk table builder.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
entries: A list of VendorRamdiskTableEntry namedtuple.
|
||||||
|
ramdisk_total_size: Total size in bytes of all ramdisks in the table.
|
||||||
|
"""
|
||||||
|
|
||||||
|
VendorRamdiskTableEntry = collections.namedtuple( # pylint: disable=invalid-name
|
||||||
|
'VendorRamdiskTableEntry',
|
||||||
|
['ramdisk_path', 'ramdisk_size', 'ramdisk_offset', 'ramdisk_type',
|
||||||
|
'ramdisk_name', 'board_id'])
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.entries = []
|
||||||
|
self.ramdisk_total_size = 0
|
||||||
|
self.ramdisk_names = set()
|
||||||
|
|
||||||
|
def add_entry(self, ramdisk_path, ramdisk_type, ramdisk_name, board_id):
|
||||||
|
# Strip any trailing null for simple comparison.
|
||||||
|
stripped_ramdisk_name = ramdisk_name.rstrip(b'\x00')
|
||||||
|
if stripped_ramdisk_name in VENDOR_RAMDISK_NAME_BLOCKLIST:
|
||||||
|
raise ValueError(
|
||||||
|
f'Banned vendor ramdisk name: {stripped_ramdisk_name}')
|
||||||
|
if stripped_ramdisk_name in self.ramdisk_names:
|
||||||
|
raise ValueError(
|
||||||
|
f'Duplicated vendor ramdisk name: {stripped_ramdisk_name}')
|
||||||
|
self.ramdisk_names.add(stripped_ramdisk_name)
|
||||||
|
|
||||||
|
if board_id is None:
|
||||||
|
board_id = array.array(
|
||||||
|
'I', [0] * VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE)
|
||||||
|
else:
|
||||||
|
board_id = array.array('I', board_id)
|
||||||
|
if len(board_id) != VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE:
|
||||||
|
raise ValueError('board_id size must be '
|
||||||
|
f'{VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE}')
|
||||||
|
|
||||||
|
with open(ramdisk_path, 'rb') as f:
|
||||||
|
ramdisk_size = filesize(f)
|
||||||
|
self.entries.append(self.VendorRamdiskTableEntry(
|
||||||
|
ramdisk_path, ramdisk_size, self.ramdisk_total_size, ramdisk_type,
|
||||||
|
ramdisk_name, board_id))
|
||||||
|
self.ramdisk_total_size += ramdisk_size
|
||||||
|
|
||||||
|
def write_ramdisks_padded(self, fout, alignment):
|
||||||
|
for entry in self.entries:
|
||||||
|
with open(entry.ramdisk_path, 'rb') as f:
|
||||||
|
fout.write(f.read())
|
||||||
|
pad_file(fout, alignment)
|
||||||
|
|
||||||
|
def write_entries_padded(self, fout, alignment):
|
||||||
|
for entry in self.entries:
|
||||||
|
fout.write(pack('I', entry.ramdisk_size))
|
||||||
|
fout.write(pack('I', entry.ramdisk_offset))
|
||||||
|
fout.write(pack('I', entry.ramdisk_type))
|
||||||
|
fout.write(pack(f'{VENDOR_RAMDISK_NAME_SIZE}s',
|
||||||
|
entry.ramdisk_name))
|
||||||
|
fout.write(entry.board_id)
|
||||||
|
pad_file(fout, alignment)
|
||||||
|
|
||||||
|
|
||||||
|
def write_padded_file(f_out, f_in, padding):
|
||||||
|
if f_in is None:
|
||||||
|
return
|
||||||
|
f_out.write(f_in.read())
|
||||||
|
pad_file(f_out, padding)
|
||||||
|
|
||||||
|
|
||||||
|
def parse_int(x):
|
||||||
|
return int(x, 0)
|
||||||
|
|
||||||
|
|
||||||
|
def parse_os_version(x):
|
||||||
|
match = re.search(r'^(\d{1,3})(?:\.(\d{1,3})(?:\.(\d{1,3}))?)?', x)
|
||||||
|
if match:
|
||||||
|
a = int(match.group(1))
|
||||||
|
b = c = 0
|
||||||
|
if match.lastindex >= 2:
|
||||||
|
b = int(match.group(2))
|
||||||
|
if match.lastindex == 3:
|
||||||
|
c = int(match.group(3))
|
||||||
|
# 7 bits allocated for each field
|
||||||
|
assert a < 128
|
||||||
|
assert b < 128
|
||||||
|
assert c < 128
|
||||||
|
return (a << 14) | (b << 7) | c
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
def parse_os_patch_level(x):
|
||||||
|
match = re.search(r'^(\d{4})-(\d{2})(?:-(\d{2}))?', x)
|
||||||
|
if match:
|
||||||
|
y = int(match.group(1)) - 2000
|
||||||
|
m = int(match.group(2))
|
||||||
|
# 7 bits allocated for the year, 4 bits for the month
|
||||||
|
assert 0 <= y < 128
|
||||||
|
assert 0 < m <= 12
|
||||||
|
return (y << 4) | m
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
def parse_vendor_ramdisk_type(x):
|
||||||
|
type_dict = {
|
||||||
|
'none': VENDOR_RAMDISK_TYPE_NONE,
|
||||||
|
'platform': VENDOR_RAMDISK_TYPE_PLATFORM,
|
||||||
|
'recovery': VENDOR_RAMDISK_TYPE_RECOVERY,
|
||||||
|
'dlkm': VENDOR_RAMDISK_TYPE_DLKM,
|
||||||
|
}
|
||||||
|
if x.lower() in type_dict:
|
||||||
|
return type_dict[x.lower()]
|
||||||
|
return parse_int(x)
|
||||||
|
|
||||||
|
|
||||||
|
def get_vendor_boot_v4_usage():
|
||||||
|
return """vendor boot version 4 arguments:
|
||||||
|
--ramdisk_type {none,platform,recovery,dlkm}
|
||||||
|
specify the type of the ramdisk
|
||||||
|
--ramdisk_name NAME
|
||||||
|
specify the name of the ramdisk
|
||||||
|
--board_id{0..15} NUMBER
|
||||||
|
specify the value of the board_id vector, defaults to 0
|
||||||
|
--vendor_ramdisk_fragment VENDOR_RAMDISK_FILE
|
||||||
|
path to the vendor ramdisk file
|
||||||
|
|
||||||
|
These options can be specified multiple times, where each vendor ramdisk
|
||||||
|
option group ends with a --vendor_ramdisk_fragment option.
|
||||||
|
Each option group appends an additional ramdisk to the vendor boot image.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
def parse_vendor_ramdisk_args(args, args_list):
|
||||||
|
"""Parses vendor ramdisk specific arguments.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
args: An argparse.Namespace object. Parsed results are stored into this
|
||||||
|
object.
|
||||||
|
args_list: A list of argument strings to be parsed.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A list argument strings that are not parsed by this method.
|
||||||
|
"""
|
||||||
|
parser = ArgumentParser(add_help=False)
|
||||||
|
parser.add_argument('--ramdisk_type', type=parse_vendor_ramdisk_type,
|
||||||
|
default=VENDOR_RAMDISK_TYPE_NONE)
|
||||||
|
parser.add_argument('--ramdisk_name',
|
||||||
|
type=AsciizBytes(bufsize=VENDOR_RAMDISK_NAME_SIZE),
|
||||||
|
required=True)
|
||||||
|
for i in range(VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE):
|
||||||
|
parser.add_argument(f'--board_id{i}', type=parse_int, default=0)
|
||||||
|
parser.add_argument(PARSER_ARGUMENT_VENDOR_RAMDISK_FRAGMENT, required=True)
|
||||||
|
|
||||||
|
unknown_args = []
|
||||||
|
|
||||||
|
vendor_ramdisk_table_builder = VendorRamdiskTableBuilder()
|
||||||
|
if args.vendor_ramdisk is not None:
|
||||||
|
vendor_ramdisk_table_builder.add_entry(
|
||||||
|
args.vendor_ramdisk.name, VENDOR_RAMDISK_TYPE_PLATFORM, b'', None)
|
||||||
|
|
||||||
|
while PARSER_ARGUMENT_VENDOR_RAMDISK_FRAGMENT in args_list:
|
||||||
|
idx = args_list.index(PARSER_ARGUMENT_VENDOR_RAMDISK_FRAGMENT) + 2
|
||||||
|
vendor_ramdisk_args = args_list[:idx]
|
||||||
|
args_list = args_list[idx:]
|
||||||
|
|
||||||
|
ramdisk_args, extra_args = parser.parse_known_args(vendor_ramdisk_args)
|
||||||
|
ramdisk_args_dict = vars(ramdisk_args)
|
||||||
|
unknown_args.extend(extra_args)
|
||||||
|
|
||||||
|
ramdisk_path = ramdisk_args.vendor_ramdisk_fragment
|
||||||
|
ramdisk_type = ramdisk_args.ramdisk_type
|
||||||
|
ramdisk_name = ramdisk_args.ramdisk_name
|
||||||
|
board_id = [ramdisk_args_dict[f'board_id{i}']
|
||||||
|
for i in range(VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE)]
|
||||||
|
vendor_ramdisk_table_builder.add_entry(ramdisk_path, ramdisk_type,
|
||||||
|
ramdisk_name, board_id)
|
||||||
|
|
||||||
|
if len(args_list) > 0:
|
||||||
|
unknown_args.extend(args_list)
|
||||||
|
|
||||||
|
args.vendor_ramdisk_total_size = (vendor_ramdisk_table_builder
|
||||||
|
.ramdisk_total_size)
|
||||||
|
args.vendor_ramdisk_table_entry_num = len(vendor_ramdisk_table_builder
|
||||||
|
.entries)
|
||||||
|
args.vendor_ramdisk_table_builder = vendor_ramdisk_table_builder
|
||||||
|
return unknown_args
|
||||||
|
|
||||||
|
|
||||||
|
def parse_cmdline():
|
||||||
|
version_parser = ArgumentParser(add_help=False)
|
||||||
|
version_parser.add_argument('--header_version', type=parse_int, default=0)
|
||||||
|
if version_parser.parse_known_args()[0].header_version < 3:
|
||||||
|
# For boot header v0 to v2, the kernel commandline field is split into
|
||||||
|
# two fields, cmdline and extra_cmdline. Both fields are asciiz strings,
|
||||||
|
# so we minus one here to ensure the encoded string plus the
|
||||||
|
# null-terminator can fit in the buffer size.
|
||||||
|
cmdline_size = BOOT_ARGS_SIZE + BOOT_EXTRA_ARGS_SIZE - 1
|
||||||
|
else:
|
||||||
|
cmdline_size = BOOT_ARGS_SIZE + BOOT_EXTRA_ARGS_SIZE
|
||||||
|
|
||||||
|
parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter,
|
||||||
|
epilog=get_vendor_boot_v4_usage())
|
||||||
|
parser.add_argument('--kernel', type=FileType('rb'),
|
||||||
|
help='path to the kernel')
|
||||||
|
parser.add_argument('--ramdisk', type=FileType('rb'),
|
||||||
|
help='path to the ramdisk')
|
||||||
|
parser.add_argument('--second', type=FileType('rb'),
|
||||||
|
help='path to the second bootloader')
|
||||||
|
parser.add_argument('--dtb', type=FileType('rb'), help='path to the dtb')
|
||||||
|
dtbo_group = parser.add_mutually_exclusive_group()
|
||||||
|
dtbo_group.add_argument('--recovery_dtbo', type=FileType('rb'),
|
||||||
|
help='path to the recovery DTBO')
|
||||||
|
dtbo_group.add_argument('--recovery_acpio', type=FileType('rb'),
|
||||||
|
metavar='RECOVERY_ACPIO', dest='recovery_dtbo',
|
||||||
|
help='path to the recovery ACPIO')
|
||||||
|
parser.add_argument('--cmdline', type=AsciizBytes(bufsize=cmdline_size),
|
||||||
|
default='', help='kernel command line arguments')
|
||||||
|
parser.add_argument('--vendor_cmdline',
|
||||||
|
type=AsciizBytes(bufsize=VENDOR_BOOT_ARGS_SIZE),
|
||||||
|
default='',
|
||||||
|
help='vendor boot kernel command line arguments')
|
||||||
|
parser.add_argument('--base', type=parse_int, default=0x10000000,
|
||||||
|
help='base address')
|
||||||
|
parser.add_argument('--kernel_offset', type=parse_int, default=0x00008000,
|
||||||
|
help='kernel offset')
|
||||||
|
parser.add_argument('--ramdisk_offset', type=parse_int, default=0x01000000,
|
||||||
|
help='ramdisk offset')
|
||||||
|
parser.add_argument('--second_offset', type=parse_int, default=0x00f00000,
|
||||||
|
help='second bootloader offset')
|
||||||
|
parser.add_argument('--dtb_offset', type=parse_int, default=0x01f00000,
|
||||||
|
help='dtb offset')
|
||||||
|
|
||||||
|
parser.add_argument('--os_version', type=parse_os_version, default=0,
|
||||||
|
help='operating system version')
|
||||||
|
parser.add_argument('--os_patch_level', type=parse_os_patch_level,
|
||||||
|
default=0, help='operating system patch level')
|
||||||
|
parser.add_argument('--tags_offset', type=parse_int, default=0x00000100,
|
||||||
|
help='tags offset')
|
||||||
|
parser.add_argument('--board', type=AsciizBytes(bufsize=BOOT_NAME_SIZE),
|
||||||
|
default='', help='board name')
|
||||||
|
parser.add_argument('--pagesize', type=parse_int,
|
||||||
|
choices=[2**i for i in range(11, 15)], default=2048,
|
||||||
|
help='page size')
|
||||||
|
parser.add_argument('--id', action='store_true',
|
||||||
|
help='print the image ID on standard output')
|
||||||
|
parser.add_argument('--header_version', type=parse_int, default=0,
|
||||||
|
help='boot image header version')
|
||||||
|
parser.add_argument('-o', '--output', type=FileType('wb'),
|
||||||
|
help='output file name')
|
||||||
|
parser.add_argument('--vendor_boot', type=FileType('wb'),
|
||||||
|
help='vendor boot output file name')
|
||||||
|
parser.add_argument('--vendor_ramdisk', type=FileType('rb'),
|
||||||
|
help='path to the vendor ramdisk')
|
||||||
|
parser.add_argument('--vendor_bootconfig', type=FileType('rb'),
|
||||||
|
help='path to the vendor bootconfig file')
|
||||||
|
|
||||||
|
gki_2_0_signing_args = parser.add_argument_group(
|
||||||
|
'[DEPRECATED] GKI 2.0 signing arguments')
|
||||||
|
gki_2_0_signing_args.add_argument(
|
||||||
|
'--gki_signing_algorithm', help='GKI signing algorithm to use')
|
||||||
|
gki_2_0_signing_args.add_argument(
|
||||||
|
'--gki_signing_key', help='path to RSA private key file')
|
||||||
|
gki_2_0_signing_args.add_argument(
|
||||||
|
'--gki_signing_signature_args', default='',
|
||||||
|
help='other hash arguments passed to avbtool')
|
||||||
|
gki_2_0_signing_args.add_argument(
|
||||||
|
'--gki_signing_avbtool_path', default='avbtool',
|
||||||
|
help='path to avbtool for boot signature generation')
|
||||||
|
|
||||||
|
args, extra_args = parser.parse_known_args()
|
||||||
|
if args.vendor_boot is not None and args.header_version > 3:
|
||||||
|
extra_args = parse_vendor_ramdisk_args(args, extra_args)
|
||||||
|
if len(extra_args) > 0:
|
||||||
|
raise ValueError(f'Unrecognized arguments: {extra_args}')
|
||||||
|
|
||||||
|
if args.header_version < 3:
|
||||||
|
args.extra_cmdline = args.cmdline[BOOT_ARGS_SIZE-1:]
|
||||||
|
args.cmdline = args.cmdline[:BOOT_ARGS_SIZE-1] + b'\x00'
|
||||||
|
assert len(args.cmdline) <= BOOT_ARGS_SIZE
|
||||||
|
assert len(args.extra_cmdline) <= BOOT_EXTRA_ARGS_SIZE
|
||||||
|
|
||||||
|
return args
|
||||||
|
|
||||||
|
|
||||||
|
def add_boot_image_signature(args, pagesize):
|
||||||
|
"""Adds the boot image signature.
|
||||||
|
|
||||||
|
Note that the signature will only be verified in VTS to ensure a
|
||||||
|
generic boot.img is used. It will not be used by the device
|
||||||
|
bootloader at boot time. The bootloader should only verify
|
||||||
|
the boot vbmeta at the end of the boot partition (or in the top-level
|
||||||
|
vbmeta partition) via the Android Verified Boot process, when the
|
||||||
|
device boots.
|
||||||
|
"""
|
||||||
|
# Flush the buffer for signature calculation.
|
||||||
|
args.output.flush()
|
||||||
|
|
||||||
|
# Outputs the signed vbmeta to a separate file, then append to boot.img
|
||||||
|
# as the boot signature.
|
||||||
|
with tempfile.TemporaryDirectory() as temp_out_dir:
|
||||||
|
boot_signature_output = os.path.join(temp_out_dir, 'boot_signature')
|
||||||
|
generate_gki_certificate(
|
||||||
|
image=args.output.name, avbtool=args.gki_signing_avbtool_path,
|
||||||
|
name='boot', algorithm=args.gki_signing_algorithm,
|
||||||
|
key=args.gki_signing_key, salt='d00df00d',
|
||||||
|
additional_avb_args=args.gki_signing_signature_args.split(),
|
||||||
|
output=boot_signature_output,
|
||||||
|
)
|
||||||
|
with open(boot_signature_output, 'rb') as boot_signature:
|
||||||
|
boot_signature_bytes = boot_signature.read()
|
||||||
|
if len(boot_signature_bytes) > BOOT_IMAGE_V4_SIGNATURE_SIZE:
|
||||||
|
raise ValueError(
|
||||||
|
f'boot sigature size is > {BOOT_IMAGE_V4_SIGNATURE_SIZE}')
|
||||||
|
boot_signature_bytes += b'\x00' * (
|
||||||
|
BOOT_IMAGE_V4_SIGNATURE_SIZE - len(boot_signature_bytes))
|
||||||
|
assert len(boot_signature_bytes) == BOOT_IMAGE_V4_SIGNATURE_SIZE
|
||||||
|
args.output.write(boot_signature_bytes)
|
||||||
|
pad_file(args.output, pagesize)
|
||||||
|
|
||||||
|
|
||||||
|
def write_data(args, pagesize):
|
||||||
|
write_padded_file(args.output, args.kernel, pagesize)
|
||||||
|
write_padded_file(args.output, args.ramdisk, pagesize)
|
||||||
|
write_padded_file(args.output, args.second, pagesize)
|
||||||
|
|
||||||
|
if args.header_version > 0 and args.header_version < 3:
|
||||||
|
write_padded_file(args.output, args.recovery_dtbo, pagesize)
|
||||||
|
if args.header_version == 2:
|
||||||
|
write_padded_file(args.output, args.dtb, pagesize)
|
||||||
|
if args.header_version >= 4 and should_add_legacy_gki_boot_signature(args):
|
||||||
|
add_boot_image_signature(args, pagesize)
|
||||||
|
|
||||||
|
|
||||||
|
def write_vendor_boot_data(args):
|
||||||
|
if args.header_version > 3:
|
||||||
|
builder = args.vendor_ramdisk_table_builder
|
||||||
|
builder.write_ramdisks_padded(args.vendor_boot, args.pagesize)
|
||||||
|
write_padded_file(args.vendor_boot, args.dtb, args.pagesize)
|
||||||
|
builder.write_entries_padded(args.vendor_boot, args.pagesize)
|
||||||
|
write_padded_file(args.vendor_boot, args.vendor_bootconfig,
|
||||||
|
args.pagesize)
|
||||||
|
else:
|
||||||
|
write_padded_file(args.vendor_boot, args.vendor_ramdisk, args.pagesize)
|
||||||
|
write_padded_file(args.vendor_boot, args.dtb, args.pagesize)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
args = parse_cmdline()
|
||||||
|
if args.vendor_boot is not None:
|
||||||
|
if args.header_version not in {3, 4}:
|
||||||
|
raise ValueError(
|
||||||
|
'--vendor_boot not compatible with given header version')
|
||||||
|
if args.header_version == 3 and args.vendor_ramdisk is None:
|
||||||
|
raise ValueError('--vendor_ramdisk missing or invalid')
|
||||||
|
write_vendor_boot_header(args)
|
||||||
|
write_vendor_boot_data(args)
|
||||||
|
if args.output is not None:
|
||||||
|
if args.second is not None and args.header_version > 2:
|
||||||
|
raise ValueError(
|
||||||
|
'--second not compatible with given header version')
|
||||||
|
img_id = write_header(args)
|
||||||
|
if args.header_version > 2:
|
||||||
|
write_data(args, BOOT_IMAGE_HEADER_V3_PAGESIZE)
|
||||||
|
else:
|
||||||
|
write_data(args, args.pagesize)
|
||||||
|
if args.id and img_id is not None:
|
||||||
|
print('0x' + ''.join(f'{octet:02x}' for octet in img_id))
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
394
kernel_build/mkbootimg/repack_bootimg.py
Executable file
394
kernel_build/mkbootimg/repack_bootimg.py
Executable file
|
@ -0,0 +1,394 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
#
|
||||||
|
# Copyright 2021, The Android Open Source Project
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""Repacks the boot image.
|
||||||
|
|
||||||
|
Unpacks the boot image and the ramdisk inside, then add files into
|
||||||
|
the ramdisk to repack the boot image.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import datetime
|
||||||
|
import enum
|
||||||
|
import glob
|
||||||
|
import os
|
||||||
|
import shlex
|
||||||
|
import shutil
|
||||||
|
import subprocess
|
||||||
|
import tempfile
|
||||||
|
|
||||||
|
|
||||||
|
class TempFileManager:
|
||||||
|
"""Manages temporary files and dirs."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self._temp_files = []
|
||||||
|
|
||||||
|
def __del__(self):
|
||||||
|
"""Removes temp dirs and files."""
|
||||||
|
for f in self._temp_files:
|
||||||
|
if os.path.isdir(f):
|
||||||
|
shutil.rmtree(f, ignore_errors=True)
|
||||||
|
else:
|
||||||
|
os.remove(f)
|
||||||
|
|
||||||
|
def make_temp_dir(self, prefix='tmp', suffix=''):
|
||||||
|
"""Makes a temporary dir that will be cleaned up in the destructor.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The absolute pathname of the new directory.
|
||||||
|
"""
|
||||||
|
dir_name = tempfile.mkdtemp(prefix=prefix, suffix=suffix)
|
||||||
|
self._temp_files.append(dir_name)
|
||||||
|
return dir_name
|
||||||
|
|
||||||
|
def make_temp_file(self, prefix='tmp', suffix=''):
|
||||||
|
"""Make a temp file that will be deleted in the destructor.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The absolute pathname of the new file.
|
||||||
|
"""
|
||||||
|
fd, file_name = tempfile.mkstemp(prefix=prefix, suffix=suffix)
|
||||||
|
os.close(fd)
|
||||||
|
self._temp_files.append(file_name)
|
||||||
|
return file_name
|
||||||
|
|
||||||
|
|
||||||
|
class RamdiskFormat(enum.Enum):
|
||||||
|
"""Enum class for different ramdisk compression formats."""
|
||||||
|
LZ4 = 1
|
||||||
|
GZIP = 2
|
||||||
|
|
||||||
|
|
||||||
|
class BootImageType(enum.Enum):
|
||||||
|
"""Enum class for different boot image types."""
|
||||||
|
BOOT_IMAGE = 1
|
||||||
|
VENDOR_BOOT_IMAGE = 2
|
||||||
|
SINGLE_RAMDISK_FRAGMENT = 3
|
||||||
|
MULTIPLE_RAMDISK_FRAGMENTS = 4
|
||||||
|
|
||||||
|
|
||||||
|
class RamdiskImage:
|
||||||
|
"""A class that supports packing/unpacking a ramdisk."""
|
||||||
|
def __init__(self, ramdisk_img, unpack=True):
|
||||||
|
self._ramdisk_img = ramdisk_img
|
||||||
|
self._ramdisk_format = None
|
||||||
|
self._ramdisk_dir = None
|
||||||
|
self._temp_file_manager = TempFileManager()
|
||||||
|
|
||||||
|
if unpack:
|
||||||
|
self._unpack_ramdisk()
|
||||||
|
else:
|
||||||
|
self._ramdisk_dir = self._temp_file_manager.make_temp_dir(
|
||||||
|
suffix='_new_ramdisk')
|
||||||
|
|
||||||
|
def _unpack_ramdisk(self):
|
||||||
|
"""Unpacks the ramdisk."""
|
||||||
|
self._ramdisk_dir = self._temp_file_manager.make_temp_dir(
|
||||||
|
suffix='_' + os.path.basename(self._ramdisk_img))
|
||||||
|
|
||||||
|
# The compression format might be in 'lz4' or 'gzip' format,
|
||||||
|
# trying lz4 first.
|
||||||
|
for compression_type, compression_util in [
|
||||||
|
(RamdiskFormat.LZ4, 'lz4'),
|
||||||
|
(RamdiskFormat.GZIP, 'gzip')]:
|
||||||
|
|
||||||
|
# Command arguments:
|
||||||
|
# -d: decompression
|
||||||
|
# -c: write to stdout
|
||||||
|
decompression_cmd = [
|
||||||
|
compression_util, '-d', '-c', self._ramdisk_img]
|
||||||
|
|
||||||
|
decompressed_result = subprocess.run(
|
||||||
|
decompression_cmd, check=False, capture_output=True)
|
||||||
|
|
||||||
|
if decompressed_result.returncode == 0:
|
||||||
|
self._ramdisk_format = compression_type
|
||||||
|
break
|
||||||
|
|
||||||
|
if self._ramdisk_format is not None:
|
||||||
|
# toybox cpio arguments:
|
||||||
|
# -i: extract files from stdin
|
||||||
|
# -d: create directories if needed
|
||||||
|
# -u: override existing files
|
||||||
|
subprocess.run(
|
||||||
|
['toybox', 'cpio', '-idu'], check=True,
|
||||||
|
input=decompressed_result.stdout, cwd=self._ramdisk_dir)
|
||||||
|
|
||||||
|
print(f"=== Unpacked ramdisk: '{self._ramdisk_img}' at "
|
||||||
|
f"'{self._ramdisk_dir}' ===")
|
||||||
|
else:
|
||||||
|
raise RuntimeError('Failed to decompress ramdisk.')
|
||||||
|
|
||||||
|
def repack_ramdisk(self, out_ramdisk_file):
|
||||||
|
"""Repacks a ramdisk from self._ramdisk_dir.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
out_ramdisk_file: the output ramdisk file to save.
|
||||||
|
"""
|
||||||
|
compression_cmd = ['lz4', '-l', '-12', '--favor-decSpeed']
|
||||||
|
if self._ramdisk_format == RamdiskFormat.GZIP:
|
||||||
|
compression_cmd = ['gzip']
|
||||||
|
|
||||||
|
print('Repacking ramdisk, which might take a few seconds ...')
|
||||||
|
|
||||||
|
mkbootfs_result = subprocess.run(
|
||||||
|
['mkbootfs', self._ramdisk_dir], check=True, capture_output=True)
|
||||||
|
|
||||||
|
with open(out_ramdisk_file, 'w') as output_fd:
|
||||||
|
subprocess.run(compression_cmd, check=True,
|
||||||
|
input=mkbootfs_result.stdout, stdout=output_fd)
|
||||||
|
|
||||||
|
print("=== Repacked ramdisk: '{}' ===".format(out_ramdisk_file))
|
||||||
|
|
||||||
|
@property
|
||||||
|
def ramdisk_dir(self):
|
||||||
|
"""Returns the internal ramdisk dir."""
|
||||||
|
return self._ramdisk_dir
|
||||||
|
|
||||||
|
|
||||||
|
class BootImage:
|
||||||
|
"""A class that supports packing/unpacking a boot.img and ramdisk."""
|
||||||
|
|
||||||
|
def __init__(self, bootimg):
|
||||||
|
self._bootimg = bootimg
|
||||||
|
self._bootimg_dir = None
|
||||||
|
self._bootimg_type = None
|
||||||
|
self._ramdisk = None
|
||||||
|
self._previous_mkbootimg_args = []
|
||||||
|
self._temp_file_manager = TempFileManager()
|
||||||
|
|
||||||
|
self._unpack_bootimg()
|
||||||
|
|
||||||
|
def _get_vendor_ramdisks(self):
|
||||||
|
"""Returns a list of vendor ramdisks after unpack."""
|
||||||
|
return sorted(glob.glob(
|
||||||
|
os.path.join(self._bootimg_dir, 'vendor_ramdisk*')))
|
||||||
|
|
||||||
|
def _unpack_bootimg(self):
|
||||||
|
"""Unpacks the boot.img and the ramdisk inside."""
|
||||||
|
self._bootimg_dir = self._temp_file_manager.make_temp_dir(
|
||||||
|
suffix='_' + os.path.basename(self._bootimg))
|
||||||
|
|
||||||
|
# Unpacks the boot.img first.
|
||||||
|
unpack_bootimg_cmds = [
|
||||||
|
'unpack_bootimg',
|
||||||
|
'--boot_img', self._bootimg,
|
||||||
|
'--out', self._bootimg_dir,
|
||||||
|
'--format=mkbootimg',
|
||||||
|
]
|
||||||
|
result = subprocess.run(unpack_bootimg_cmds, check=True,
|
||||||
|
capture_output=True, encoding='utf-8')
|
||||||
|
self._previous_mkbootimg_args = shlex.split(result.stdout)
|
||||||
|
print("=== Unpacked boot image: '{}' ===".format(self._bootimg))
|
||||||
|
|
||||||
|
# From the output dir, checks there is 'ramdisk' or 'vendor_ramdisk'.
|
||||||
|
ramdisk = os.path.join(self._bootimg_dir, 'ramdisk')
|
||||||
|
vendor_ramdisk = os.path.join(self._bootimg_dir, 'vendor_ramdisk')
|
||||||
|
vendor_ramdisks = self._get_vendor_ramdisks()
|
||||||
|
if os.path.exists(ramdisk):
|
||||||
|
self._ramdisk = RamdiskImage(ramdisk)
|
||||||
|
self._bootimg_type = BootImageType.BOOT_IMAGE
|
||||||
|
elif os.path.exists(vendor_ramdisk):
|
||||||
|
self._ramdisk = RamdiskImage(vendor_ramdisk)
|
||||||
|
self._bootimg_type = BootImageType.VENDOR_BOOT_IMAGE
|
||||||
|
elif len(vendor_ramdisks) == 1:
|
||||||
|
self._ramdisk = RamdiskImage(vendor_ramdisks[0])
|
||||||
|
self._bootimg_type = BootImageType.SINGLE_RAMDISK_FRAGMENT
|
||||||
|
elif len(vendor_ramdisks) > 1:
|
||||||
|
# Creates an empty RamdiskImage() below, without unpack.
|
||||||
|
# We'll then add files into this newly created ramdisk, then pack
|
||||||
|
# it with other vendor ramdisks together.
|
||||||
|
self._ramdisk = RamdiskImage(ramdisk_img=None, unpack=False)
|
||||||
|
self._bootimg_type = BootImageType.MULTIPLE_RAMDISK_FRAGMENTS
|
||||||
|
else:
|
||||||
|
raise RuntimeError('Both ramdisk and vendor_ramdisk do not exist.')
|
||||||
|
|
||||||
|
def repack_bootimg(self):
|
||||||
|
"""Repacks the ramdisk and rebuild the boot.img"""
|
||||||
|
|
||||||
|
new_ramdisk = self._temp_file_manager.make_temp_file(
|
||||||
|
prefix='ramdisk-patched')
|
||||||
|
self._ramdisk.repack_ramdisk(new_ramdisk)
|
||||||
|
|
||||||
|
mkbootimg_cmd = ['mkbootimg']
|
||||||
|
|
||||||
|
# Uses previous mkbootimg args, e.g., --vendor_cmdline, --dtb_offset.
|
||||||
|
mkbootimg_cmd.extend(self._previous_mkbootimg_args)
|
||||||
|
|
||||||
|
ramdisk_option = ''
|
||||||
|
if self._bootimg_type == BootImageType.BOOT_IMAGE:
|
||||||
|
ramdisk_option = '--ramdisk'
|
||||||
|
mkbootimg_cmd.extend(['--output', self._bootimg])
|
||||||
|
elif self._bootimg_type == BootImageType.VENDOR_BOOT_IMAGE:
|
||||||
|
ramdisk_option = '--vendor_ramdisk'
|
||||||
|
mkbootimg_cmd.extend(['--vendor_boot', self._bootimg])
|
||||||
|
elif self._bootimg_type == BootImageType.SINGLE_RAMDISK_FRAGMENT:
|
||||||
|
ramdisk_option = '--vendor_ramdisk_fragment'
|
||||||
|
mkbootimg_cmd.extend(['--vendor_boot', self._bootimg])
|
||||||
|
elif self._bootimg_type == BootImageType.MULTIPLE_RAMDISK_FRAGMENTS:
|
||||||
|
mkbootimg_cmd.extend(['--ramdisk_type', 'PLATFORM'])
|
||||||
|
ramdisk_name = (
|
||||||
|
'RAMDISK_' +
|
||||||
|
datetime.datetime.now().strftime('%Y-%m-%d_%H:%M:%S'))
|
||||||
|
mkbootimg_cmd.extend(['--ramdisk_name', ramdisk_name])
|
||||||
|
mkbootimg_cmd.extend(['--vendor_ramdisk_fragment', new_ramdisk])
|
||||||
|
mkbootimg_cmd.extend(['--vendor_boot', self._bootimg])
|
||||||
|
|
||||||
|
if ramdisk_option and ramdisk_option not in mkbootimg_cmd:
|
||||||
|
raise RuntimeError("Failed to find '{}' from:\n {}".format(
|
||||||
|
ramdisk_option, shlex.join(mkbootimg_cmd)))
|
||||||
|
# Replaces the original ramdisk with the newly packed ramdisk.
|
||||||
|
if ramdisk_option:
|
||||||
|
ramdisk_index = mkbootimg_cmd.index(ramdisk_option) + 1
|
||||||
|
mkbootimg_cmd[ramdisk_index] = new_ramdisk
|
||||||
|
|
||||||
|
subprocess.check_call(mkbootimg_cmd)
|
||||||
|
print("=== Repacked boot image: '{}' ===".format(self._bootimg))
|
||||||
|
|
||||||
|
def add_files(self, copy_pairs):
|
||||||
|
"""Copy files specified by copy_pairs into current ramdisk.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
copy_pairs: a list of (src_pathname, dst_file) pairs.
|
||||||
|
"""
|
||||||
|
# Creates missing parent dirs with 0o755.
|
||||||
|
original_mask = os.umask(0o022)
|
||||||
|
for src_pathname, dst_file in copy_pairs:
|
||||||
|
dst_pathname = os.path.join(self.ramdisk_dir, dst_file)
|
||||||
|
dst_dir = os.path.dirname(dst_pathname)
|
||||||
|
if not os.path.exists(dst_dir):
|
||||||
|
print("Creating dir '{}'".format(dst_dir))
|
||||||
|
os.makedirs(dst_dir, 0o755)
|
||||||
|
print(f"Copying file '{src_pathname}' to '{dst_pathname}'")
|
||||||
|
shutil.copy2(src_pathname, dst_pathname, follow_symlinks=False)
|
||||||
|
os.umask(original_mask)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def ramdisk_dir(self):
|
||||||
|
"""Returns the internal ramdisk dir."""
|
||||||
|
return self._ramdisk.ramdisk_dir
|
||||||
|
|
||||||
|
|
||||||
|
def _get_repack_usage():
|
||||||
|
return """Usage examples:
|
||||||
|
|
||||||
|
* --ramdisk_add SRC_FILE:DST_FILE
|
||||||
|
|
||||||
|
If --local is given, copy SRC_FILE from the local filesystem to DST_FILE in
|
||||||
|
the ramdisk of --dst_bootimg.
|
||||||
|
If --src_bootimg is specified, copy SRC_FILE from the ramdisk of
|
||||||
|
--src_bootimg to DST_FILE in the ramdisk of --dst_bootimg.
|
||||||
|
|
||||||
|
Copies a local file 'userdebug_plat_sepolicy.cil' into the ramdisk of
|
||||||
|
--dst_bootimg, and then rebuild --dst_bootimg:
|
||||||
|
|
||||||
|
$ %(prog)s \\
|
||||||
|
--local --dst_bootimg vendor_boot-debug.img \\
|
||||||
|
--ramdisk_add userdebug_plat_sepolicy.cil:userdebug_plat_sepolicy.cil
|
||||||
|
|
||||||
|
Copies 'first_stage_ramdisk/userdebug_plat_sepolicy.cil' from the ramdisk
|
||||||
|
of --src_bootimg to 'userdebug_plat_sepolicy.cil' in the ramdisk of
|
||||||
|
--dst_bootimg, and then rebuild --dst_bootimg:
|
||||||
|
|
||||||
|
$ %(prog)s \\
|
||||||
|
--src_bootimg boot-debug-5.4.img --dst_bootimg vendor_boot-debug.img \\
|
||||||
|
--ramdisk_add first_stage_ramdisk/userdebug_plat_sepolicy.cil:userdebug_plat_sepolicy.cil
|
||||||
|
|
||||||
|
This option can be specified multiple times to copy multiple files:
|
||||||
|
|
||||||
|
$ %(prog)s \\
|
||||||
|
--local --dst_bootimg vendor_boot-debug.img \\
|
||||||
|
--ramdisk_add file1:path/in/dst_bootimg/file1 \\
|
||||||
|
--ramdisk_add file2:path/in/dst_bootimg/file2
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_args():
|
||||||
|
"""Parse command-line options."""
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||||
|
description='Repacks boot, recovery or vendor_boot image by importing '
|
||||||
|
'ramdisk files from --src_bootimg to --dst_bootimg.',
|
||||||
|
epilog=_get_repack_usage(),
|
||||||
|
)
|
||||||
|
|
||||||
|
src_group = parser.add_mutually_exclusive_group(required=True)
|
||||||
|
src_group.add_argument(
|
||||||
|
'--src_bootimg', help='filename to source boot image',
|
||||||
|
type=BootImage)
|
||||||
|
src_group.add_argument(
|
||||||
|
'--local', help='use local files as repack source',
|
||||||
|
action='store_true')
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
'--dst_bootimg', help='filename to destination boot image',
|
||||||
|
type=BootImage, required=True)
|
||||||
|
parser.add_argument(
|
||||||
|
'--ramdisk_add', metavar='SRC_FILE:DST_FILE',
|
||||||
|
help='a copy pair to copy into the ramdisk of --dst_bootimg',
|
||||||
|
action='extend', nargs='+', required=True)
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
# Parse args.ramdisk_add to a list of copy pairs.
|
||||||
|
if args.src_bootimg:
|
||||||
|
args.ramdisk_add = [
|
||||||
|
_parse_ramdisk_copy_pair(p, args.src_bootimg.ramdisk_dir)
|
||||||
|
for p in args.ramdisk_add
|
||||||
|
]
|
||||||
|
else:
|
||||||
|
# Repack from local files.
|
||||||
|
args.ramdisk_add = [
|
||||||
|
_parse_ramdisk_copy_pair(p) for p in args.ramdisk_add
|
||||||
|
]
|
||||||
|
|
||||||
|
return args
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_ramdisk_copy_pair(pair, src_ramdisk_dir=None):
|
||||||
|
"""Parse a ramdisk copy pair argument."""
|
||||||
|
if ':' in pair:
|
||||||
|
src_file, dst_file = pair.split(':', maxsplit=1)
|
||||||
|
else:
|
||||||
|
src_file = dst_file = pair
|
||||||
|
|
||||||
|
# os.path.join() only works on relative path components.
|
||||||
|
# If a component is an absolute path, all previous components are thrown
|
||||||
|
# away and joining continues from the absolute path component.
|
||||||
|
# So make sure the file name is not absolute before calling os.path.join().
|
||||||
|
if src_ramdisk_dir:
|
||||||
|
if os.path.isabs(src_file):
|
||||||
|
raise ValueError('file name cannot be absolute when repacking from '
|
||||||
|
'a ramdisk: ' + src_file)
|
||||||
|
src_pathname = os.path.join(src_ramdisk_dir, src_file)
|
||||||
|
else:
|
||||||
|
src_pathname = src_file
|
||||||
|
if os.path.isabs(dst_file):
|
||||||
|
raise ValueError('destination file name cannot be absolute: ' +
|
||||||
|
dst_file)
|
||||||
|
return (src_pathname, dst_file)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
"""Parse arguments and repack boot image."""
|
||||||
|
args = _parse_args()
|
||||||
|
args.dst_bootimg.add_files(args.ramdisk_add)
|
||||||
|
args.dst_bootimg.repack_bootimg()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
563
kernel_build/mkbootimg/unpack_bootimg.py
Executable file
563
kernel_build/mkbootimg/unpack_bootimg.py
Executable file
|
@ -0,0 +1,563 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
#
|
||||||
|
# Copyright 2018, The Android Open Source Project
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""Unpacks the boot image.
|
||||||
|
|
||||||
|
Extracts the kernel, ramdisk, second bootloader, dtb and recovery dtbo images.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from argparse import ArgumentParser, RawDescriptionHelpFormatter
|
||||||
|
from struct import unpack
|
||||||
|
import os
|
||||||
|
import shlex
|
||||||
|
|
||||||
|
BOOT_IMAGE_HEADER_V3_PAGESIZE = 4096
|
||||||
|
VENDOR_RAMDISK_NAME_SIZE = 32
|
||||||
|
VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE = 16
|
||||||
|
|
||||||
|
|
||||||
|
def create_out_dir(dir_path):
|
||||||
|
"""creates a directory 'dir_path' if it does not exist"""
|
||||||
|
if not os.path.exists(dir_path):
|
||||||
|
os.makedirs(dir_path)
|
||||||
|
|
||||||
|
|
||||||
|
def extract_image(offset, size, bootimage, extracted_image_name):
|
||||||
|
"""extracts an image from the bootimage"""
|
||||||
|
bootimage.seek(offset)
|
||||||
|
with open(extracted_image_name, 'wb') as file_out:
|
||||||
|
file_out.write(bootimage.read(size))
|
||||||
|
|
||||||
|
|
||||||
|
def get_number_of_pages(image_size, page_size):
|
||||||
|
"""calculates the number of pages required for the image"""
|
||||||
|
return (image_size + page_size - 1) // page_size
|
||||||
|
|
||||||
|
|
||||||
|
def cstr(s):
|
||||||
|
"""Remove first NULL character and any character beyond."""
|
||||||
|
return s.split('\0', 1)[0]
|
||||||
|
|
||||||
|
|
||||||
|
def format_os_version(os_version):
|
||||||
|
if os_version == 0:
|
||||||
|
return None
|
||||||
|
a = os_version >> 14
|
||||||
|
b = os_version >> 7 & ((1<<7) - 1)
|
||||||
|
c = os_version & ((1<<7) - 1)
|
||||||
|
return f'{a}.{b}.{c}'
|
||||||
|
|
||||||
|
|
||||||
|
def format_os_patch_level(os_patch_level):
|
||||||
|
if os_patch_level == 0:
|
||||||
|
return None
|
||||||
|
y = os_patch_level >> 4
|
||||||
|
y += 2000
|
||||||
|
m = os_patch_level & ((1<<4) - 1)
|
||||||
|
return f'{y:04d}-{m:02d}'
|
||||||
|
|
||||||
|
|
||||||
|
def decode_os_version_patch_level(os_version_patch_level):
|
||||||
|
"""Returns a tuple of (os_version, os_patch_level)."""
|
||||||
|
os_version = os_version_patch_level >> 11
|
||||||
|
os_patch_level = os_version_patch_level & ((1<<11) - 1)
|
||||||
|
return (format_os_version(os_version),
|
||||||
|
format_os_patch_level(os_patch_level))
|
||||||
|
|
||||||
|
|
||||||
|
class BootImageInfoFormatter:
|
||||||
|
"""Formats the boot image info."""
|
||||||
|
|
||||||
|
def format_pretty_text(self):
|
||||||
|
lines = []
|
||||||
|
lines.append(f'boot magic: {self.boot_magic}')
|
||||||
|
|
||||||
|
if self.header_version < 3:
|
||||||
|
lines.append(f'kernel_size: {self.kernel_size}')
|
||||||
|
lines.append(
|
||||||
|
f'kernel load address: {self.kernel_load_address:#010x}')
|
||||||
|
lines.append(f'ramdisk size: {self.ramdisk_size}')
|
||||||
|
lines.append(
|
||||||
|
f'ramdisk load address: {self.ramdisk_load_address:#010x}')
|
||||||
|
lines.append(f'second bootloader size: {self.second_size}')
|
||||||
|
lines.append(
|
||||||
|
f'second bootloader load address: '
|
||||||
|
f'{self.second_load_address:#010x}')
|
||||||
|
lines.append(
|
||||||
|
f'kernel tags load address: {self.tags_load_address:#010x}')
|
||||||
|
lines.append(f'page size: {self.page_size}')
|
||||||
|
else:
|
||||||
|
lines.append(f'kernel_size: {self.kernel_size}')
|
||||||
|
lines.append(f'ramdisk size: {self.ramdisk_size}')
|
||||||
|
|
||||||
|
lines.append(f'os version: {self.os_version}')
|
||||||
|
lines.append(f'os patch level: {self.os_patch_level}')
|
||||||
|
lines.append(f'boot image header version: {self.header_version}')
|
||||||
|
|
||||||
|
if self.header_version < 3:
|
||||||
|
lines.append(f'product name: {self.product_name}')
|
||||||
|
|
||||||
|
lines.append(f'command line args: {self.cmdline}')
|
||||||
|
|
||||||
|
if self.header_version < 3:
|
||||||
|
lines.append(f'additional command line args: {self.extra_cmdline}')
|
||||||
|
|
||||||
|
if self.header_version in {1, 2}:
|
||||||
|
lines.append(f'recovery dtbo size: {self.recovery_dtbo_size}')
|
||||||
|
lines.append(
|
||||||
|
f'recovery dtbo offset: {self.recovery_dtbo_offset:#018x}')
|
||||||
|
lines.append(f'boot header size: {self.boot_header_size}')
|
||||||
|
|
||||||
|
if self.header_version == 2:
|
||||||
|
lines.append(f'dtb size: {self.dtb_size}')
|
||||||
|
lines.append(f'dtb address: {self.dtb_load_address:#018x}')
|
||||||
|
|
||||||
|
if self.header_version >= 4:
|
||||||
|
lines.append(
|
||||||
|
f'boot.img signature size: {self.boot_signature_size}')
|
||||||
|
|
||||||
|
return '\n'.join(lines)
|
||||||
|
|
||||||
|
def format_mkbootimg_argument(self):
|
||||||
|
args = []
|
||||||
|
args.extend(['--header_version', str(self.header_version)])
|
||||||
|
if self.os_version:
|
||||||
|
args.extend(['--os_version', self.os_version])
|
||||||
|
if self.os_patch_level:
|
||||||
|
args.extend(['--os_patch_level', self.os_patch_level])
|
||||||
|
|
||||||
|
args.extend(['--kernel', os.path.join(self.image_dir, 'kernel')])
|
||||||
|
args.extend(['--ramdisk', os.path.join(self.image_dir, 'ramdisk')])
|
||||||
|
|
||||||
|
if self.header_version <= 2:
|
||||||
|
if self.second_size > 0:
|
||||||
|
args.extend(['--second',
|
||||||
|
os.path.join(self.image_dir, 'second')])
|
||||||
|
if self.recovery_dtbo_size > 0:
|
||||||
|
args.extend(['--recovery_dtbo',
|
||||||
|
os.path.join(self.image_dir, 'recovery_dtbo')])
|
||||||
|
if self.dtb_size > 0:
|
||||||
|
args.extend(['--dtb', os.path.join(self.image_dir, 'dtb')])
|
||||||
|
|
||||||
|
args.extend(['--pagesize', f'{self.page_size:#010x}'])
|
||||||
|
|
||||||
|
# Kernel load address is base + kernel_offset in mkbootimg.py.
|
||||||
|
# However we don't know the value of 'base' when unpacking a boot
|
||||||
|
# image in this script, so we set 'base' to zero and 'kernel_offset'
|
||||||
|
# to the kernel load address, 'ramdisk_offset' to the ramdisk load
|
||||||
|
# address, ... etc.
|
||||||
|
args.extend(['--base', f'{0:#010x}'])
|
||||||
|
args.extend(['--kernel_offset',
|
||||||
|
f'{self.kernel_load_address:#010x}'])
|
||||||
|
args.extend(['--ramdisk_offset',
|
||||||
|
f'{self.ramdisk_load_address:#010x}'])
|
||||||
|
args.extend(['--second_offset',
|
||||||
|
f'{self.second_load_address:#010x}'])
|
||||||
|
args.extend(['--tags_offset', f'{self.tags_load_address:#010x}'])
|
||||||
|
|
||||||
|
# dtb is added in boot image v2, and is absent in v1 or v0.
|
||||||
|
if self.header_version == 2:
|
||||||
|
# dtb_offset is uint64_t.
|
||||||
|
args.extend(['--dtb_offset', f'{self.dtb_load_address:#018x}'])
|
||||||
|
|
||||||
|
args.extend(['--board', self.product_name])
|
||||||
|
args.extend(['--cmdline', self.cmdline + self.extra_cmdline])
|
||||||
|
else:
|
||||||
|
args.extend(['--cmdline', self.cmdline])
|
||||||
|
|
||||||
|
return args
|
||||||
|
|
||||||
|
|
||||||
|
def unpack_boot_image(boot_img, output_dir):
|
||||||
|
"""extracts kernel, ramdisk, second bootloader and recovery dtbo"""
|
||||||
|
info = BootImageInfoFormatter()
|
||||||
|
info.boot_magic = unpack('8s', boot_img.read(8))[0].decode()
|
||||||
|
|
||||||
|
kernel_ramdisk_second_info = unpack('9I', boot_img.read(9 * 4))
|
||||||
|
# header_version is always at [8] regardless of the value of header_version.
|
||||||
|
info.header_version = kernel_ramdisk_second_info[8]
|
||||||
|
|
||||||
|
if info.header_version < 3:
|
||||||
|
info.kernel_size = kernel_ramdisk_second_info[0]
|
||||||
|
info.kernel_load_address = kernel_ramdisk_second_info[1]
|
||||||
|
info.ramdisk_size = kernel_ramdisk_second_info[2]
|
||||||
|
info.ramdisk_load_address = kernel_ramdisk_second_info[3]
|
||||||
|
info.second_size = kernel_ramdisk_second_info[4]
|
||||||
|
info.second_load_address = kernel_ramdisk_second_info[5]
|
||||||
|
info.tags_load_address = kernel_ramdisk_second_info[6]
|
||||||
|
info.page_size = kernel_ramdisk_second_info[7]
|
||||||
|
os_version_patch_level = unpack('I', boot_img.read(1 * 4))[0]
|
||||||
|
else:
|
||||||
|
info.kernel_size = kernel_ramdisk_second_info[0]
|
||||||
|
info.ramdisk_size = kernel_ramdisk_second_info[1]
|
||||||
|
os_version_patch_level = kernel_ramdisk_second_info[2]
|
||||||
|
info.second_size = 0
|
||||||
|
info.page_size = BOOT_IMAGE_HEADER_V3_PAGESIZE
|
||||||
|
|
||||||
|
info.os_version, info.os_patch_level = decode_os_version_patch_level(
|
||||||
|
os_version_patch_level)
|
||||||
|
|
||||||
|
if info.header_version < 3:
|
||||||
|
info.product_name = cstr(unpack('16s',
|
||||||
|
boot_img.read(16))[0].decode())
|
||||||
|
info.cmdline = cstr(unpack('512s', boot_img.read(512))[0].decode())
|
||||||
|
boot_img.read(32) # ignore SHA
|
||||||
|
info.extra_cmdline = cstr(unpack('1024s',
|
||||||
|
boot_img.read(1024))[0].decode())
|
||||||
|
else:
|
||||||
|
info.cmdline = cstr(unpack('1536s',
|
||||||
|
boot_img.read(1536))[0].decode())
|
||||||
|
|
||||||
|
if info.header_version in {1, 2}:
|
||||||
|
info.recovery_dtbo_size = unpack('I', boot_img.read(1 * 4))[0]
|
||||||
|
info.recovery_dtbo_offset = unpack('Q', boot_img.read(8))[0]
|
||||||
|
info.boot_header_size = unpack('I', boot_img.read(4))[0]
|
||||||
|
else:
|
||||||
|
info.recovery_dtbo_size = 0
|
||||||
|
|
||||||
|
if info.header_version == 2:
|
||||||
|
info.dtb_size = unpack('I', boot_img.read(4))[0]
|
||||||
|
info.dtb_load_address = unpack('Q', boot_img.read(8))[0]
|
||||||
|
else:
|
||||||
|
info.dtb_size = 0
|
||||||
|
info.dtb_load_address = 0
|
||||||
|
|
||||||
|
if info.header_version >= 4:
|
||||||
|
info.boot_signature_size = unpack('I', boot_img.read(4))[0]
|
||||||
|
else:
|
||||||
|
info.boot_signature_size = 0
|
||||||
|
|
||||||
|
# The first page contains the boot header
|
||||||
|
num_header_pages = 1
|
||||||
|
|
||||||
|
# Convenient shorthand.
|
||||||
|
page_size = info.page_size
|
||||||
|
|
||||||
|
num_kernel_pages = get_number_of_pages(info.kernel_size, page_size)
|
||||||
|
kernel_offset = page_size * num_header_pages # header occupies a page
|
||||||
|
image_info_list = [(kernel_offset, info.kernel_size, 'kernel')]
|
||||||
|
|
||||||
|
num_ramdisk_pages = get_number_of_pages(info.ramdisk_size, page_size)
|
||||||
|
ramdisk_offset = page_size * (num_header_pages + num_kernel_pages
|
||||||
|
) # header + kernel
|
||||||
|
image_info_list.append((ramdisk_offset, info.ramdisk_size, 'ramdisk'))
|
||||||
|
|
||||||
|
if info.second_size > 0:
|
||||||
|
second_offset = page_size * (
|
||||||
|
num_header_pages + num_kernel_pages + num_ramdisk_pages
|
||||||
|
) # header + kernel + ramdisk
|
||||||
|
image_info_list.append((second_offset, info.second_size, 'second'))
|
||||||
|
|
||||||
|
if info.recovery_dtbo_size > 0:
|
||||||
|
image_info_list.append((info.recovery_dtbo_offset,
|
||||||
|
info.recovery_dtbo_size,
|
||||||
|
'recovery_dtbo'))
|
||||||
|
if info.dtb_size > 0:
|
||||||
|
num_second_pages = get_number_of_pages(info.second_size, page_size)
|
||||||
|
num_recovery_dtbo_pages = get_number_of_pages(
|
||||||
|
info.recovery_dtbo_size, page_size)
|
||||||
|
dtb_offset = page_size * (
|
||||||
|
num_header_pages + num_kernel_pages + num_ramdisk_pages +
|
||||||
|
num_second_pages + num_recovery_dtbo_pages)
|
||||||
|
|
||||||
|
image_info_list.append((dtb_offset, info.dtb_size, 'dtb'))
|
||||||
|
|
||||||
|
if info.boot_signature_size > 0:
|
||||||
|
# boot signature only exists in boot.img version >= v4.
|
||||||
|
# There are only kernel and ramdisk pages before the signature.
|
||||||
|
boot_signature_offset = page_size * (
|
||||||
|
num_header_pages + num_kernel_pages + num_ramdisk_pages)
|
||||||
|
|
||||||
|
image_info_list.append((boot_signature_offset, info.boot_signature_size,
|
||||||
|
'boot_signature'))
|
||||||
|
|
||||||
|
create_out_dir(output_dir)
|
||||||
|
for offset, size, name in image_info_list:
|
||||||
|
extract_image(offset, size, boot_img, os.path.join(output_dir, name))
|
||||||
|
info.image_dir = output_dir
|
||||||
|
|
||||||
|
return info
|
||||||
|
|
||||||
|
|
||||||
|
class VendorBootImageInfoFormatter:
|
||||||
|
"""Formats the vendor_boot image info."""
|
||||||
|
|
||||||
|
def format_pretty_text(self):
|
||||||
|
lines = []
|
||||||
|
lines.append(f'boot magic: {self.boot_magic}')
|
||||||
|
lines.append(f'vendor boot image header version: {self.header_version}')
|
||||||
|
lines.append(f'page size: {self.page_size:#010x}')
|
||||||
|
lines.append(f'kernel load address: {self.kernel_load_address:#010x}')
|
||||||
|
lines.append(f'ramdisk load address: {self.ramdisk_load_address:#010x}')
|
||||||
|
if self.header_version > 3:
|
||||||
|
lines.append(
|
||||||
|
f'vendor ramdisk total size: {self.vendor_ramdisk_size}')
|
||||||
|
else:
|
||||||
|
lines.append(f'vendor ramdisk size: {self.vendor_ramdisk_size}')
|
||||||
|
lines.append(f'vendor command line args: {self.cmdline}')
|
||||||
|
lines.append(
|
||||||
|
f'kernel tags load address: {self.tags_load_address:#010x}')
|
||||||
|
lines.append(f'product name: {self.product_name}')
|
||||||
|
lines.append(f'vendor boot image header size: {self.header_size}')
|
||||||
|
lines.append(f'dtb size: {self.dtb_size}')
|
||||||
|
lines.append(f'dtb address: {self.dtb_load_address:#018x}')
|
||||||
|
if self.header_version > 3:
|
||||||
|
lines.append(
|
||||||
|
f'vendor ramdisk table size: {self.vendor_ramdisk_table_size}')
|
||||||
|
lines.append('vendor ramdisk table: [')
|
||||||
|
indent = lambda level: ' ' * 4 * level
|
||||||
|
for entry in self.vendor_ramdisk_table:
|
||||||
|
(output_ramdisk_name, ramdisk_size, ramdisk_offset,
|
||||||
|
ramdisk_type, ramdisk_name, board_id) = entry
|
||||||
|
lines.append(indent(1) + f'{output_ramdisk_name}: ''{')
|
||||||
|
lines.append(indent(2) + f'size: {ramdisk_size}')
|
||||||
|
lines.append(indent(2) + f'offset: {ramdisk_offset}')
|
||||||
|
lines.append(indent(2) + f'type: {ramdisk_type:#x}')
|
||||||
|
lines.append(indent(2) + f'name: {ramdisk_name}')
|
||||||
|
lines.append(indent(2) + 'board_id: [')
|
||||||
|
stride = 4
|
||||||
|
for row_idx in range(0, len(board_id), stride):
|
||||||
|
row = board_id[row_idx:row_idx + stride]
|
||||||
|
lines.append(
|
||||||
|
indent(3) + ' '.join(f'{e:#010x},' for e in row))
|
||||||
|
lines.append(indent(2) + ']')
|
||||||
|
lines.append(indent(1) + '}')
|
||||||
|
lines.append(']')
|
||||||
|
lines.append(
|
||||||
|
f'vendor bootconfig size: {self.vendor_bootconfig_size}')
|
||||||
|
|
||||||
|
return '\n'.join(lines)
|
||||||
|
|
||||||
|
def format_mkbootimg_argument(self):
|
||||||
|
args = []
|
||||||
|
args.extend(['--header_version', str(self.header_version)])
|
||||||
|
args.extend(['--pagesize', f'{self.page_size:#010x}'])
|
||||||
|
args.extend(['--base', f'{0:#010x}'])
|
||||||
|
args.extend(['--kernel_offset', f'{self.kernel_load_address:#010x}'])
|
||||||
|
args.extend(['--ramdisk_offset', f'{self.ramdisk_load_address:#010x}'])
|
||||||
|
args.extend(['--tags_offset', f'{self.tags_load_address:#010x}'])
|
||||||
|
args.extend(['--dtb_offset', f'{self.dtb_load_address:#018x}'])
|
||||||
|
args.extend(['--vendor_cmdline', self.cmdline])
|
||||||
|
args.extend(['--board', self.product_name])
|
||||||
|
|
||||||
|
if self.dtb_size > 0:
|
||||||
|
args.extend(['--dtb', os.path.join(self.image_dir, 'dtb')])
|
||||||
|
|
||||||
|
if self.header_version > 3:
|
||||||
|
args.extend(['--vendor_bootconfig',
|
||||||
|
os.path.join(self.image_dir, 'bootconfig')])
|
||||||
|
|
||||||
|
for entry in self.vendor_ramdisk_table:
|
||||||
|
(output_ramdisk_name, _, _, ramdisk_type,
|
||||||
|
ramdisk_name, board_id) = entry
|
||||||
|
args.extend(['--ramdisk_type', str(ramdisk_type)])
|
||||||
|
args.extend(['--ramdisk_name', ramdisk_name])
|
||||||
|
for idx, e in enumerate(board_id):
|
||||||
|
if e:
|
||||||
|
args.extend([f'--board_id{idx}', f'{e:#010x}'])
|
||||||
|
vendor_ramdisk_path = os.path.join(
|
||||||
|
self.image_dir, output_ramdisk_name)
|
||||||
|
args.extend(['--vendor_ramdisk_fragment', vendor_ramdisk_path])
|
||||||
|
else:
|
||||||
|
args.extend(['--vendor_ramdisk',
|
||||||
|
os.path.join(self.image_dir, 'vendor_ramdisk')])
|
||||||
|
|
||||||
|
return args
|
||||||
|
|
||||||
|
|
||||||
|
def unpack_vendor_boot_image(boot_img, output_dir):
|
||||||
|
info = VendorBootImageInfoFormatter()
|
||||||
|
info.boot_magic = unpack('8s', boot_img.read(8))[0].decode()
|
||||||
|
info.header_version = unpack('I', boot_img.read(4))[0]
|
||||||
|
info.page_size = unpack('I', boot_img.read(4))[0]
|
||||||
|
info.kernel_load_address = unpack('I', boot_img.read(4))[0]
|
||||||
|
info.ramdisk_load_address = unpack('I', boot_img.read(4))[0]
|
||||||
|
info.vendor_ramdisk_size = unpack('I', boot_img.read(4))[0]
|
||||||
|
info.cmdline = cstr(unpack('2048s', boot_img.read(2048))[0].decode())
|
||||||
|
info.tags_load_address = unpack('I', boot_img.read(4))[0]
|
||||||
|
info.product_name = cstr(unpack('16s', boot_img.read(16))[0].decode())
|
||||||
|
info.header_size = unpack('I', boot_img.read(4))[0]
|
||||||
|
info.dtb_size = unpack('I', boot_img.read(4))[0]
|
||||||
|
info.dtb_load_address = unpack('Q', boot_img.read(8))[0]
|
||||||
|
|
||||||
|
# Convenient shorthand.
|
||||||
|
page_size = info.page_size
|
||||||
|
# The first pages contain the boot header
|
||||||
|
num_boot_header_pages = get_number_of_pages(info.header_size, page_size)
|
||||||
|
num_boot_ramdisk_pages = get_number_of_pages(
|
||||||
|
info.vendor_ramdisk_size, page_size)
|
||||||
|
num_boot_dtb_pages = get_number_of_pages(info.dtb_size, page_size)
|
||||||
|
|
||||||
|
ramdisk_offset_base = page_size * num_boot_header_pages
|
||||||
|
image_info_list = []
|
||||||
|
|
||||||
|
if info.header_version > 3:
|
||||||
|
info.vendor_ramdisk_table_size = unpack('I', boot_img.read(4))[0]
|
||||||
|
vendor_ramdisk_table_entry_num = unpack('I', boot_img.read(4))[0]
|
||||||
|
vendor_ramdisk_table_entry_size = unpack('I', boot_img.read(4))[0]
|
||||||
|
info.vendor_bootconfig_size = unpack('I', boot_img.read(4))[0]
|
||||||
|
num_vendor_ramdisk_table_pages = get_number_of_pages(
|
||||||
|
info.vendor_ramdisk_table_size, page_size)
|
||||||
|
vendor_ramdisk_table_offset = page_size * (
|
||||||
|
num_boot_header_pages + num_boot_ramdisk_pages + num_boot_dtb_pages)
|
||||||
|
|
||||||
|
vendor_ramdisk_table = []
|
||||||
|
vendor_ramdisk_symlinks = []
|
||||||
|
for idx in range(vendor_ramdisk_table_entry_num):
|
||||||
|
entry_offset = vendor_ramdisk_table_offset + (
|
||||||
|
vendor_ramdisk_table_entry_size * idx)
|
||||||
|
boot_img.seek(entry_offset)
|
||||||
|
ramdisk_size = unpack('I', boot_img.read(4))[0]
|
||||||
|
ramdisk_offset = unpack('I', boot_img.read(4))[0]
|
||||||
|
ramdisk_type = unpack('I', boot_img.read(4))[0]
|
||||||
|
ramdisk_name = cstr(unpack(
|
||||||
|
f'{VENDOR_RAMDISK_NAME_SIZE}s',
|
||||||
|
boot_img.read(VENDOR_RAMDISK_NAME_SIZE))[0].decode())
|
||||||
|
board_id = unpack(
|
||||||
|
f'{VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE}I',
|
||||||
|
boot_img.read(
|
||||||
|
4 * VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE))
|
||||||
|
output_ramdisk_name = f'vendor_ramdisk{idx:02}'
|
||||||
|
|
||||||
|
image_info_list.append((ramdisk_offset_base + ramdisk_offset,
|
||||||
|
ramdisk_size, output_ramdisk_name))
|
||||||
|
vendor_ramdisk_symlinks.append((output_ramdisk_name, ramdisk_name))
|
||||||
|
vendor_ramdisk_table.append(
|
||||||
|
(output_ramdisk_name, ramdisk_size, ramdisk_offset,
|
||||||
|
ramdisk_type, ramdisk_name, board_id))
|
||||||
|
|
||||||
|
info.vendor_ramdisk_table = vendor_ramdisk_table
|
||||||
|
|
||||||
|
bootconfig_offset = page_size * (num_boot_header_pages
|
||||||
|
+ num_boot_ramdisk_pages + num_boot_dtb_pages
|
||||||
|
+ num_vendor_ramdisk_table_pages)
|
||||||
|
image_info_list.append((bootconfig_offset, info.vendor_bootconfig_size,
|
||||||
|
'bootconfig'))
|
||||||
|
else:
|
||||||
|
image_info_list.append(
|
||||||
|
(ramdisk_offset_base, info.vendor_ramdisk_size, 'vendor_ramdisk'))
|
||||||
|
|
||||||
|
dtb_offset = page_size * (num_boot_header_pages + num_boot_ramdisk_pages
|
||||||
|
) # header + vendor_ramdisk
|
||||||
|
if info.dtb_size > 0:
|
||||||
|
image_info_list.append((dtb_offset, info.dtb_size, 'dtb'))
|
||||||
|
|
||||||
|
create_out_dir(output_dir)
|
||||||
|
for offset, size, name in image_info_list:
|
||||||
|
extract_image(offset, size, boot_img, os.path.join(output_dir, name))
|
||||||
|
info.image_dir = output_dir
|
||||||
|
|
||||||
|
if info.header_version > 3:
|
||||||
|
vendor_ramdisk_by_name_dir = os.path.join(
|
||||||
|
output_dir, 'vendor-ramdisk-by-name')
|
||||||
|
create_out_dir(vendor_ramdisk_by_name_dir)
|
||||||
|
for src, dst in vendor_ramdisk_symlinks:
|
||||||
|
src_pathname = os.path.join('..', src)
|
||||||
|
dst_pathname = os.path.join(
|
||||||
|
vendor_ramdisk_by_name_dir, f'ramdisk_{dst}')
|
||||||
|
if os.path.lexists(dst_pathname):
|
||||||
|
os.remove(dst_pathname)
|
||||||
|
os.symlink(src_pathname, dst_pathname)
|
||||||
|
|
||||||
|
return info
|
||||||
|
|
||||||
|
|
||||||
|
def unpack_bootimg(boot_img, output_dir):
|
||||||
|
"""Unpacks the |boot_img| to |output_dir|, and returns the 'info' object."""
|
||||||
|
with open(boot_img, 'rb') as image_file:
|
||||||
|
boot_magic = unpack('8s', image_file.read(8))[0].decode()
|
||||||
|
image_file.seek(0)
|
||||||
|
if boot_magic == 'ANDROID!':
|
||||||
|
info = unpack_boot_image(image_file, output_dir)
|
||||||
|
elif boot_magic == 'VNDRBOOT':
|
||||||
|
info = unpack_vendor_boot_image(image_file, output_dir)
|
||||||
|
else:
|
||||||
|
raise ValueError(f'Not an Android boot image, magic: {boot_magic}')
|
||||||
|
|
||||||
|
return info
|
||||||
|
|
||||||
|
|
||||||
|
def print_bootimg_info(info, output_format, null_separator):
|
||||||
|
"""Format and print boot image info."""
|
||||||
|
if output_format == 'mkbootimg':
|
||||||
|
mkbootimg_args = info.format_mkbootimg_argument()
|
||||||
|
if null_separator:
|
||||||
|
print('\0'.join(mkbootimg_args) + '\0', end='')
|
||||||
|
else:
|
||||||
|
print(shlex.join(mkbootimg_args))
|
||||||
|
else:
|
||||||
|
print(info.format_pretty_text())
|
||||||
|
|
||||||
|
|
||||||
|
def get_unpack_usage():
|
||||||
|
return """Output format:
|
||||||
|
|
||||||
|
* info
|
||||||
|
|
||||||
|
Pretty-printed info-rich text format suitable for human inspection.
|
||||||
|
|
||||||
|
* mkbootimg
|
||||||
|
|
||||||
|
Output shell-escaped (quoted) argument strings that can be used to
|
||||||
|
reconstruct the boot image. For example:
|
||||||
|
|
||||||
|
$ unpack_bootimg --boot_img vendor_boot.img --out out --format=mkbootimg |
|
||||||
|
tee mkbootimg_args
|
||||||
|
$ sh -c "mkbootimg $(cat mkbootimg_args) --vendor_boot repacked.img"
|
||||||
|
|
||||||
|
vendor_boot.img and repacked.img would be equivalent.
|
||||||
|
|
||||||
|
If the -0 option is specified, output unescaped null-terminated argument
|
||||||
|
strings that are suitable to be parsed by a shell script (xargs -0 format):
|
||||||
|
|
||||||
|
$ unpack_bootimg --boot_img vendor_boot.img --out out --format=mkbootimg \\
|
||||||
|
-0 | tee mkbootimg_args
|
||||||
|
$ declare -a MKBOOTIMG_ARGS=()
|
||||||
|
$ while IFS= read -r -d '' ARG; do
|
||||||
|
MKBOOTIMG_ARGS+=("${ARG}")
|
||||||
|
done <mkbootimg_args
|
||||||
|
$ mkbootimg "${MKBOOTIMG_ARGS[@]}" --vendor_boot repacked.img
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
def parse_cmdline():
|
||||||
|
"""parse command line arguments"""
|
||||||
|
parser = ArgumentParser(
|
||||||
|
formatter_class=RawDescriptionHelpFormatter,
|
||||||
|
description='Unpacks boot, recovery or vendor_boot image.',
|
||||||
|
epilog=get_unpack_usage(),
|
||||||
|
)
|
||||||
|
parser.add_argument('--boot_img', required=True,
|
||||||
|
help='path to the boot, recovery or vendor_boot image')
|
||||||
|
parser.add_argument('--out', default='out',
|
||||||
|
help='output directory of the unpacked images')
|
||||||
|
parser.add_argument('--format', choices=['info', 'mkbootimg'],
|
||||||
|
default='info',
|
||||||
|
help='text output format (default: info)')
|
||||||
|
parser.add_argument('-0', '--null', action='store_true',
|
||||||
|
help='output null-terminated argument strings')
|
||||||
|
return parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
"""parse arguments and unpack boot image"""
|
||||||
|
args = parse_cmdline()
|
||||||
|
info = unpack_bootimg(args.boot_img, args.out)
|
||||||
|
print_bootimg_info(info, args.format, args.null)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
298
kernel_build/vboot_dlkm/modules.load
Normal file
298
kernel_build/vboot_dlkm/modules.load
Normal file
|
@ -0,0 +1,298 @@
|
||||||
|
exynos-chipid_v2.ko
|
||||||
|
exynos-reboot.ko
|
||||||
|
exynos_mct.ko
|
||||||
|
exynos_tty.ko
|
||||||
|
clk_exynos.ko
|
||||||
|
i2c-exynos5.ko
|
||||||
|
ems.ko
|
||||||
|
zsmalloc.ko
|
||||||
|
lzo.ko
|
||||||
|
lzo-rle.ko
|
||||||
|
ssg.ko
|
||||||
|
blk-sec-stats.ko
|
||||||
|
phy-exynos-mipi-dsim.ko
|
||||||
|
phy-exynos-mipi.ko
|
||||||
|
phy-exynos-usbdrd-super.ko
|
||||||
|
pinctrl-samsung-s2mpu13.ko
|
||||||
|
pinctrl-samsung-s2mpu14.ko
|
||||||
|
pinctrl-samsung-core.ko
|
||||||
|
pwm-samsung.ko
|
||||||
|
clk-exynos-audss.ko
|
||||||
|
pl330.ko
|
||||||
|
samsung-dma.ko
|
||||||
|
dss.ko
|
||||||
|
debug-snapshot-debug-kinfo.ko
|
||||||
|
debug-snapshot-sfrdump.ko
|
||||||
|
debug-snapshot-qd.ko
|
||||||
|
exynos-coresight.ko
|
||||||
|
exynos-ecc-handler.ko
|
||||||
|
hardlockup-watchdog.ko
|
||||||
|
exynos-itmon.ko
|
||||||
|
exynos-adv-tracer.ko
|
||||||
|
exynos-adv-tracer-s2d.ko
|
||||||
|
ehld.ko
|
||||||
|
exynos-ssld.ko
|
||||||
|
exynos-pmu-if.ko
|
||||||
|
exynos-flexpmu-dbg.ko
|
||||||
|
exynos-pd.ko
|
||||||
|
exynos-pd_el3.ko
|
||||||
|
exynos-pd-dbg.ko
|
||||||
|
cmupmucal.ko
|
||||||
|
exynos_acpm.ko
|
||||||
|
plugin_dbg.ko
|
||||||
|
memlogger.ko
|
||||||
|
sysevent.ko
|
||||||
|
sysevent_notif.ko
|
||||||
|
imgloader.ko
|
||||||
|
ect_parser.ko
|
||||||
|
exynos-pm.ko
|
||||||
|
exynos-dm.ko
|
||||||
|
exynos-mcinfo.ko
|
||||||
|
exynos-cpuhp.ko
|
||||||
|
xperf.ko
|
||||||
|
exynos-cpupm.ko
|
||||||
|
exynos-ufcc.ko
|
||||||
|
exynos-afm.ko
|
||||||
|
mcu_ipc.ko
|
||||||
|
shm_ipc.ko
|
||||||
|
exynos_dit.ko
|
||||||
|
cpif_memlogger.ko
|
||||||
|
cpif_page.ko
|
||||||
|
direct_dm.ko
|
||||||
|
hook.ko
|
||||||
|
cpif.ko
|
||||||
|
gnss_mbox.ko
|
||||||
|
gnssif.ko
|
||||||
|
exynos_pm_qos.ko
|
||||||
|
exynos-bcm.ko
|
||||||
|
exynos-ssp.ko
|
||||||
|
exynos-cm.ko
|
||||||
|
exynos-el2.ko
|
||||||
|
exynos-s2mpu.ko
|
||||||
|
exynos-sdm.ko
|
||||||
|
exynos-seclog.ko
|
||||||
|
exynos-tzasc.ko
|
||||||
|
exynos-ppmpu.ko
|
||||||
|
exynos-seh.ko
|
||||||
|
secmem.ko
|
||||||
|
hdcp2.ko
|
||||||
|
exynos-migov.ko
|
||||||
|
exynos-gpu-profiler.ko
|
||||||
|
exynos-cpu-profiler.ko
|
||||||
|
exynos-mif-profiler.ko
|
||||||
|
exynos-wow.ko
|
||||||
|
s2mpu13_regulator.ko
|
||||||
|
s2mpu14_regulator.ko
|
||||||
|
s2mpb03.ko
|
||||||
|
exyswd-rng.ko
|
||||||
|
samsung_iommu.ko
|
||||||
|
samsung-iommu-group.ko
|
||||||
|
samsung-secure-iova.ko
|
||||||
|
exynos-drm.ko
|
||||||
|
mcd-panel-samsung-drv.ko
|
||||||
|
mcd-panel-samsung-helper.ko
|
||||||
|
mali_kbase.ko
|
||||||
|
zram.ko
|
||||||
|
scsc_log_collection.ko
|
||||||
|
scsc_logring.ko
|
||||||
|
scsc_platform_mif.ko
|
||||||
|
scsc_mx.ko
|
||||||
|
mx_client_test.ko
|
||||||
|
scsc_mmap.ko
|
||||||
|
scsc_mx250_fm.ko
|
||||||
|
scsc_bt.ko
|
||||||
|
tzdev.ko
|
||||||
|
tuihw.ko
|
||||||
|
tuihw-inf.ko
|
||||||
|
s2mpu13_mfd.ko
|
||||||
|
s2mpu14_mfd.ko
|
||||||
|
nfc_sec.ko
|
||||||
|
dma-buf-container.ko
|
||||||
|
samsung_dma_heap.ko
|
||||||
|
ufs-exynos-core.ko
|
||||||
|
scsi_srpmb.ko
|
||||||
|
spidev.ko
|
||||||
|
spi-s3c64xx.ko
|
||||||
|
smsc.ko
|
||||||
|
scsc_wlan.ko
|
||||||
|
scsc_wifilogger.ko
|
||||||
|
asix.ko
|
||||||
|
ax88179_178a.ko
|
||||||
|
smsc75xx.ko
|
||||||
|
smsc95xx.ko
|
||||||
|
cdc_mbim.ko
|
||||||
|
dwc3-exynos-usb.ko
|
||||||
|
xhci-exynos.ko
|
||||||
|
usblp.ko
|
||||||
|
cdc-wdm.ko
|
||||||
|
ehset.ko
|
||||||
|
lvstest.ko
|
||||||
|
usb_f_dm.ko
|
||||||
|
usb_f_dm1.ko
|
||||||
|
usb_f_conn_gadget.ko
|
||||||
|
usb_f_ss_mon_gadget.ko
|
||||||
|
usbserial.ko
|
||||||
|
usb_notify_layer.ko
|
||||||
|
usb_notifier.ko
|
||||||
|
s2mpu13-key.ko
|
||||||
|
acecad.ko
|
||||||
|
aiptek.ko
|
||||||
|
gtco.ko
|
||||||
|
hanwang.ko
|
||||||
|
kbtab.ko
|
||||||
|
rtc-s2mpu13.ko
|
||||||
|
acpm-mfd-bus.ko
|
||||||
|
i2c-dev.ko
|
||||||
|
i3c-hci-exynos.ko
|
||||||
|
videobuf2-dma-sg.ko
|
||||||
|
exynos_mfc.ko
|
||||||
|
fimc-is.ko
|
||||||
|
is-cis-gw1.ko
|
||||||
|
is-cis-imx258.ko
|
||||||
|
is-cis-gc5035.ko
|
||||||
|
is-cis-imx616.ko
|
||||||
|
is-cis-imx682.ko
|
||||||
|
is-cis-hi1336.ko
|
||||||
|
is-cis-gd2.ko
|
||||||
|
is-actuator-ak737x.ko
|
||||||
|
is-flash-sm5714.ko
|
||||||
|
pablo-smc.ko
|
||||||
|
is-device-eeprom.ko
|
||||||
|
is-device-otprom.ko
|
||||||
|
camerapp.ko
|
||||||
|
gdc.ko
|
||||||
|
votf.ko
|
||||||
|
mcfrc.ko
|
||||||
|
smfc.ko
|
||||||
|
scaler.ko
|
||||||
|
repeater.ko
|
||||||
|
ifpmic_class.ko
|
||||||
|
exynos_thermal.ko
|
||||||
|
gpu_cooling.ko
|
||||||
|
s3c2410_wdt.ko
|
||||||
|
softdog.ko
|
||||||
|
freq-qos-tracer.ko
|
||||||
|
exynos-acme.ko
|
||||||
|
exynos-dsufreq.ko
|
||||||
|
dw_mmc.ko
|
||||||
|
dw_mmc-pltfm.ko
|
||||||
|
dw_mmc-exynos-sec.ko
|
||||||
|
leds-sm5714-fled.ko
|
||||||
|
hid-a4tech.ko
|
||||||
|
hid-axff.ko
|
||||||
|
hid-belkin.ko
|
||||||
|
hid-cherry.ko
|
||||||
|
hid-chicony.ko
|
||||||
|
hid-cypress.ko
|
||||||
|
hid-dr.ko
|
||||||
|
hid-emsff.ko
|
||||||
|
hid-ezkey.ko
|
||||||
|
hid-gyration.ko
|
||||||
|
hid-holtek-kbd.ko
|
||||||
|
hid-holtek-mouse.ko
|
||||||
|
hid-holtekff.ko
|
||||||
|
hid-kensington.ko
|
||||||
|
hid-keytouch.ko
|
||||||
|
hid-lcpower.ko
|
||||||
|
hid-monterey.ko
|
||||||
|
hid-ntrig.ko
|
||||||
|
hid-ortek.ko
|
||||||
|
hid-pl.ko
|
||||||
|
hid-petalynx.ko
|
||||||
|
hid-primax.ko
|
||||||
|
hid-saitek.ko
|
||||||
|
hid-sjoy.ko
|
||||||
|
hid-speedlink.ko
|
||||||
|
hid-sunplus.ko
|
||||||
|
hid-gaff.ko
|
||||||
|
hid-tmff.ko
|
||||||
|
hid-tivo.ko
|
||||||
|
hid-topseed.ko
|
||||||
|
hid-twinhan.ko
|
||||||
|
hid-zpff.ko
|
||||||
|
hid-zydacron.ko
|
||||||
|
hid-waltop.ko
|
||||||
|
switch_class.ko
|
||||||
|
nanohub.ko
|
||||||
|
exynos_devfreq.ko
|
||||||
|
s2mpu13_adc.ko
|
||||||
|
npu.ko
|
||||||
|
sec_pm_debug.ko
|
||||||
|
sec_thermistor.ko
|
||||||
|
sec_wakeup_cpu_allocator.ko
|
||||||
|
sec_reboot.ko
|
||||||
|
sec_crash_key.ko
|
||||||
|
sec_crash_key_user.ko
|
||||||
|
sec_hard_reset_hook.ko
|
||||||
|
sec_key_notifier.ko
|
||||||
|
sec_class.ko
|
||||||
|
sec_bootstat.ko
|
||||||
|
sec_argos.ko
|
||||||
|
pmic_class.ko
|
||||||
|
mcd-panel-s6e3fc3_a53x.ko
|
||||||
|
dev_ril_bridge.ko
|
||||||
|
mcd-panel.ko
|
||||||
|
sec_vibrator.ko
|
||||||
|
dropdump.ko
|
||||||
|
common_muic.ko
|
||||||
|
sb-core.ko
|
||||||
|
mesh.ko
|
||||||
|
kperfmon.ko
|
||||||
|
vibrator_vib_info.ko
|
||||||
|
sec_tsp_log.ko
|
||||||
|
sec_tsp_dumpkey.ko
|
||||||
|
sec_common_fn.ko
|
||||||
|
sec_cmd.ko
|
||||||
|
vbus_notifier.ko
|
||||||
|
fingerprint.ko
|
||||||
|
fingerprint_sysfs.ko
|
||||||
|
usb_typec_manager.ko
|
||||||
|
if_cb_manager.ko
|
||||||
|
sm5714-charger.ko
|
||||||
|
pdic_notifier_module.ko
|
||||||
|
abc.ko
|
||||||
|
abc_hub.ko
|
||||||
|
sm5451-charger.ko
|
||||||
|
mfd_sm5714.ko
|
||||||
|
hall_ic.ko
|
||||||
|
hall_ic_notifier.ko
|
||||||
|
sx938x.ko
|
||||||
|
dc_vibrator.ko
|
||||||
|
sm5714_fuelgauge.ko
|
||||||
|
muic_sm5714.ko
|
||||||
|
sec-direct-charger.ko
|
||||||
|
sec_pd.ko
|
||||||
|
sb_wireless.ko
|
||||||
|
sec-battery.ko
|
||||||
|
shub.ko
|
||||||
|
sensor_core.ko
|
||||||
|
stm_ts.ko
|
||||||
|
goodix_ts_berlin.ko
|
||||||
|
pdic_sm5714.ko
|
||||||
|
exynos-bts.ko
|
||||||
|
exynos-btsops8825.ko
|
||||||
|
spu_verify.ko
|
||||||
|
exynos-usb-audio-offloading.ko
|
||||||
|
snd-soc-tfa98xx.ko
|
||||||
|
snd-soc-tfa_sysfs.ko
|
||||||
|
snd-soc-s3c-dma.ko
|
||||||
|
snd-soc-samsung-abox-gic.ko
|
||||||
|
snd-soc-samsung-abox.ko
|
||||||
|
snd-soc-samsung-abox-sync.ko
|
||||||
|
snd-soc-samsung-vts-mailbox.ko
|
||||||
|
snd-soc-samsung-vts.ko
|
||||||
|
snd-soc-samsung-slif.ko
|
||||||
|
exynos8825_audio.ko
|
||||||
|
sec_audio_sysfs.ko
|
||||||
|
sec_audio_debug.ko
|
||||||
|
tcp_westwood.ko
|
||||||
|
tcp_htcp.ko
|
||||||
|
cfg80211.ko
|
||||||
|
8821au.ko
|
||||||
|
mac80211.ko
|
||||||
|
mt76.ko
|
||||||
|
mt76-connac-lib.ko
|
||||||
|
mt76-usb.ko
|
||||||
|
mt7921-common.ko
|
||||||
|
mt7921u.ko
|
||||||
|
input_booster_lkm.ko
|
72
kernel_build/vboot_platform/fstab.s5e8825
Normal file
72
kernel_build/vboot_platform/fstab.s5e8825
Normal file
|
@ -0,0 +1,72 @@
|
||||||
|
# Android fstab file.
|
||||||
|
#<src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
|
||||||
|
# The filesystem that contains the filesystem checker binary (typically /system) cannot
|
||||||
|
# specify MF_CHECK, and must come before any filesystems that do specify MF_CHECK
|
||||||
|
|
||||||
|
system /system f2fs ro wait,logical,first_stage_mount
|
||||||
|
system /system ext4 ro wait,logical,first_stage_mount
|
||||||
|
system /system xfs ro wait,logical,first_stage_mount
|
||||||
|
system /system erofs ro wait,logical,first_stage_mount
|
||||||
|
system /system squashfs ro wait,logical,first_stage_mount
|
||||||
|
system /system btrfs ro wait,logical,first_stage_mount,compress-force=zstd:15
|
||||||
|
|
||||||
|
vendor /vendor f2fs ro wait,logical,first_stage_mount
|
||||||
|
vendor /vendor ext4 ro wait,logical,first_stage_mount
|
||||||
|
vendor /vendor xfs ro wait,logical,first_stage_mount
|
||||||
|
vendor /vendor erofs ro wait,logical,first_stage_mount
|
||||||
|
vendor /vendor squashfs ro wait,logical,first_stage_mount
|
||||||
|
vendor /vendor btrfs ro wait,logical,first_stage_mount,compress-force=zstd:15
|
||||||
|
|
||||||
|
vendor_dlkm /vendor_dlkm f2fs ro wait,logical,first_stage_mount
|
||||||
|
vendor_dlkm /vendor_dlkm ext4 ro wait,logical,first_stage_mount
|
||||||
|
vendor_dlkm /vendor_dlkm xfs ro wait,logical,first_stage_mount
|
||||||
|
vendor_dlkm /vendor_dlkm erofs ro wait,logical,first_stage_mount
|
||||||
|
vendor_dlkm /vendor_dlkm squashfs ro wait,logical,first_stage_mount
|
||||||
|
vendor_dlkm /vendor_dlkm btrfs ro wait,logical,first_stage_mount,compress-force=zstd:15
|
||||||
|
|
||||||
|
product /product f2fs ro wait,logical,first_stage_mount
|
||||||
|
product /product ext4 ro wait,logical,first_stage_mount
|
||||||
|
product /product xfs ro wait,logical,first_stage_mount
|
||||||
|
product /product erofs ro wait,logical,first_stage_mount
|
||||||
|
product /product squashfs ro wait,logical,first_stage_mount
|
||||||
|
product /product btrfs ro wait,logical,first_stage_mount,compress-force=zstd:15
|
||||||
|
|
||||||
|
odm /odm f2fs ro wait,logical,first_stage_mount
|
||||||
|
odm /odm ext4 ro wait,logical,first_stage_mount
|
||||||
|
odm /odm xfs ro wait,logical,first_stage_mount
|
||||||
|
odm /odm erofs ro wait,logical,first_stage_mount
|
||||||
|
odm /odm squashfs ro wait,logical,first_stage_mount
|
||||||
|
odm /odm btrfs ro wait,logical,first_stage_mount,compress-force=zstd:15
|
||||||
|
|
||||||
|
/dev/block/by-name/boot /boot emmc defaults first_stage_mount,formattable
|
||||||
|
/dev/block/by-name/vendor_boot /vendor_boot emmc defaults first_stage_mount,formattable
|
||||||
|
/dev/block/by-name/dtbo /dtbo emmc defaults first_stage_mount,formattable
|
||||||
|
/dev/block/by-name/metadata /metadata ext4 noatime,nosuid,nodev,noauto_da_alloc,discard,journal_checksum,data=ordered,errors=panic,sync wait,formattable,first_stage_mount,check
|
||||||
|
/dev/block/by-name/userdata /data f2fs noatime,nosuid,nodev,discard,usrquota,grpquota,fsync_mode=nobarrier,reserve_root=32768,resgid=5678,whint_mode=fs-based,inlinecrypt latemount,wait,check,quota,reservedsize=128M,fileencryption=aes-256-xts:aes-256-cts:v2+inlinecrypt_optimized+wrappedkey_v0,metadata_encryption=aes-256-xts:wrappedkey_v0,checkpoint=fs,fscompress,keydirectory=/metadata/vold/metadata_encryption
|
||||||
|
/dev/block/by-name/userdata /data btrfs noatime,nosuid,nodev,discard,usrquota,grpquota,fsync_mode=nobarrier,reserve_root=32768,resgid=5678,whint_mode=fs-based latemount,wait,check,quota,checkpoint=fs,compress=zstd:1
|
||||||
|
/dev/block/by-name/efs /mnt/vendor/efs ext4 noatime,nosuid,nodev,noauto_da_alloc,discard,journal_checksum,data=ordered,errors=panic wait,check
|
||||||
|
/dev/block/by-name/cpefs /mnt/vendor/cpefs ext4 noatime,nosuid,nodev,noauto_da_alloc,discard,journal_checksum,data=ordered,errors=panic wait,check,nofail
|
||||||
|
/dev/block/by-name/misc /misc emmc defaults defaults,first_stage_mount
|
||||||
|
/dev/block/by-name/recovery /recovery emmc defaults first_stage_mount
|
||||||
|
/dev/block/by-name/cache /cache ext4 noatime,nosuid,nodev,noauto_da_alloc,discard,journal_checksum,data=ordered,errors=panic wait,check
|
||||||
|
/dev/block/by-name/keystorage /keystorage emmc defaults first_stage_mount
|
||||||
|
/dev/block/by-name/harx /harx emmc defaults first_stage_mount
|
||||||
|
|
||||||
|
# CSC
|
||||||
|
/dev/block/by-name/prism /prism f2fs ro,barrier=1 nofail,first_stage_mount
|
||||||
|
/dev/block/by-name/prism /prism ext4 ro,barrier=1 nofail,first_stage_mount
|
||||||
|
/dev/block/by-name/prism /prism xfs ro,barrier=1 nofail,first_stage_mount
|
||||||
|
/dev/block/by-name/prism /prism erofs ro,barrier=1 nofail,first_stage_mount
|
||||||
|
/dev/block/by-name/prism /prism squashfs ro,barrier=1 nofail,first_stage_mount
|
||||||
|
/dev/block/by-name/prism /prism btrfs ro,barrier=1 nofail,first_stage_mount,compress-force=zstd:15
|
||||||
|
|
||||||
|
/dev/block/by-name/optics /optics f2fs ro,barrier=1 nofail,first_stage_mount
|
||||||
|
/dev/block/by-name/optics /optics ext4 ro,barrier=1 nofail,first_stage_mount
|
||||||
|
/dev/block/by-name/optics /optics xfs ro,barrier=1 nofail,first_stage_mount
|
||||||
|
/dev/block/by-name/optics /optics erofs ro,barrier=1 nofail,first_stage_mount
|
||||||
|
/dev/block/by-name/optics /optics squashfs ro,barrier=1 nofail,first_stage_mount
|
||||||
|
/dev/block/by-name/optics /optics btrfs ro,barrier=1 nofail,first_stage_mount,compress-force=zstd:15
|
||||||
|
|
||||||
|
# VOLD
|
||||||
|
/devices/platform/13200000.usb* auto vfat default voldmanaged=usb:auto
|
||||||
|
/devices/platform/100e0000.dwmmc2/mmc_host* auto auto default voldmanaged=sdcard:auto
|
BIN
kernel_build/vboot_platform/vendor/firmware/gt9895_a53x.bin
vendored
Normal file
BIN
kernel_build/vboot_platform/vendor/firmware/gt9895_a53x.bin
vendored
Normal file
Binary file not shown.
BIN
kernel_build/zip/META-INF/com/google/android/update-binary
Normal file
BIN
kernel_build/zip/META-INF/com/google/android/update-binary
Normal file
Binary file not shown.
24
kernel_build/zip/META-INF/com/google/android/update-commands
Normal file
24
kernel_build/zip/META-INF/com/google/android/update-commands
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
### SFF ###
|
||||||
|
# Supported commands:
|
||||||
|
# flash_compressed - flashes lz4 (.lz4), zstandard (.zst), lzma2 (.xz) or brotli (.br) (Not the same as .new.dat.br!!!) compressed files
|
||||||
|
# flash_raw - flashes raw files
|
||||||
|
# prop_equals - checks if property equals to value
|
||||||
|
# prop_contains - checks if property contains value
|
||||||
|
# prop_startswith - checks if property starts with value
|
||||||
|
# prop_endswith - checks if property ends with value
|
||||||
|
# ui_print - prints everything after it to the UI
|
||||||
|
# notify_flash - prints '%s - OK' when a file flashing ends. Default: 'no'. Opts: 'yes', 'no'; 'true', 'false'; '1', '0'; 'on', 'off'.
|
||||||
|
# quick_flash - allows up to (nLogicalProcs / 2) parallel flashing threads to be initialized simultaneously, useful with lzma2 (.xz) files.
|
||||||
|
## quick_flash - Warning: don't use other kind of commands (like ui_print) between/after flashing commands
|
||||||
|
## quick_flash - if quick_flash is enabled because they will not be respected and will be done/finish/printed immediately.
|
||||||
|
## quick_flash - Default: 'no'. Opts: 'yes', 'no'; 'true', 'false'; '1', '0'; 'on', 'off'.
|
||||||
|
# exec_bash - executes command in shell
|
||||||
|
# exec_check_bash - executes command in shell and exits if return code is not 0
|
||||||
|
# set_total_bytes - sets total bytes of data expected, should be the sum of all decompressed data (file1 + file2 ...). When set, shows the progress bar.
|
||||||
|
### All lines starting with '#' are treated as comments
|
||||||
|
### Commands & arguments should be separated by ' ', '=' or ':'
|
||||||
|
|
||||||
|
prop_startswith ro.boot.bootloader A536
|
||||||
|
quick_flash yes
|
||||||
|
flash_compressed boot.br /dev/block/by-name/boot
|
||||||
|
flash_compressed vendor_boot.br /dev/block/by-name/vendor_boot
|
Loading…
Reference in a new issue