update_engine-DownloadAction(二)
在update_engine-DownloadAction(一)中对DownloadAction介绍到了DeltaPerformer的Write方法。下面开始介绍Write方法。
src/system/update_engine/payload_consumer/delta_performer.cc
bool DeltaPerformer::Write(const void* bytes, size_t count, ErrorCode *error) {
*error = ErrorCode::kSuccess; const char* c_bytes = reinterpret_cast<const char*>(bytes); // Update the total byte downloaded count and the progress logs.
total_bytes_received_ += count;
UpdateOverallProgress(false, "Completed "); //更新进度包括了已经应用的操作数,下载的数据量,以及总的进度 while (!manifest_valid_) { //manifest_valid_的初始值为false
// Read data up to the needed limit; this is either maximium payload header
// size, or the full metadata size (once it becomes known).
const bool do_read_header = !IsHeaderParsed(); //是否解析过Header
CopyDataToBuffer(&c_bytes, &count, //将数据拷贝到缓存区server中
(do_read_header ? kMaxPayloadHeaderSize :
metadata_size_ + metadata_signature_size_)); MetadataParseResult result = ParsePayloadMetadata(buffer_, error); //解析元数据
if (result == kMetadataParseError)
return false;
if (result == kMetadataParseInsufficientData) {
// If we just processed the header, make an attempt on the manifest.
if (do_read_header && IsHeaderParsed())
continue; return true;
} // Checks the integrity of the payload manifest.
if ((*error = ValidateManifest()) != ErrorCode::kSuccess) //验证Manifest
return false;
manifest_valid_ = true; // Clear the download buffer.
DiscardBuffer(false, metadata_size_); //清除缓存区 // This populates |partitions_| and the |install_plan.partitions| with the
// list of partitions from the manifest.
if (!ParseManifestPartitions(error)) //解析Manifest中的Partitions的信息
return false; // |install_plan.partitions| was filled in, nothing need to be done here if
// the payload was already applied, returns false to terminate http fetcher,
// but keep |error| as ErrorCode::kSuccess.
if (payload_->already_applied) //检查当前payload_是否已经被应用
return false; num_total_operations_ = ;
for (const auto& partition : partitions_) {
num_total_operations_ += partition.operations_size(); //计算总的操作数
acc_num_operations_.push_back(num_total_operations_); //将每次计算的操作数放入到集合中,这样做的意义有能够根据操作数来判断是哪个
} //分区要进行操作,以及是该分区的第几个操作 LOG_IF(WARNING, !prefs_->SetInt64(kPrefsManifestMetadataSize,
metadata_size_))
<< "Unable to save the manifest metadata size.";
LOG_IF(WARNING, !prefs_->SetInt64(kPrefsManifestSignatureSize,
metadata_signature_size_))
<< "Unable to save the manifest signature size."; if (!PrimeUpdateState()) { /更新主要的状态,包含了block_size_,next_operation_num等
*error = ErrorCode::kDownloadStateInitializationError;
LOG(ERROR) << "Unable to prime the update state.";
return false;
} if (!OpenCurrentPartition()) { //打开当前的分区,包括source_slot和target_slot的,为升级做准备
*error = ErrorCode::kInstallDeviceOpenError;
return false;
} if (next_operation_num_ > )
UpdateOverallProgress(true, "Resuming after ");
LOG(INFO) << "Starting to apply update payload operations";
} while (next_operation_num_ < num_total_operations_) { //开始进行更新
// Check if we should cancel the current attempt for any reason.
// In this case, *error will have already been populated with the reason
// why we're canceling.
if (download_delegate_ && download_delegate_->ShouldCancel(error)) //目前什么都没做,直接返回了false
return false; // We know there are more operations to perform because we didn't reach the
// |num_total_operations_| limit yet.
while (next_operation_num_ >= acc_num_operations_[current_partition_]) { //说明了当前分区已经更新完成,需要更新下一个分区
CloseCurrentPartition(); //关闭当前分区
current_partition_++; //切换到下一个分区
if (!OpenCurrentPartition()) { //打开
*error = ErrorCode::kInstallDeviceOpenError;
return false;
}
}
const size_t partition_operation_num = next_operation_num_ - (
current_partition_ ? acc_num_operations_[current_partition_ - ] : ); //计算出当前分区将要应用的操作数 const InstallOperation& op =
partitions_[current_partition_].operations(partition_operation_num); //获取到操作的类型 CopyDataToBuffer(&c_bytes, &count, op.data_length()); //将该操作对应的数据放到缓存区中 // Check whether we received all of the next operation's data payload.
if (!CanPerformInstallOperation(op)) //验证该操作是否能够进行,主要就是看该操作对应的数据是否已经全部都下载完了
return true; // Validate the operation only if the metadata signature is present.
// Otherwise, keep the old behavior. This serves as a knob to disable
// the validation logic in case we find some regression after rollout.
// NOTE: If hash checks are mandatory and if metadata_signature is empty,
// we would have already failed in ParsePayloadMetadata method and thus not
// even be here. So no need to handle that case again here.
if (!payload_->metadata_signature.empty()) {
// Note: Validate must be called only if CanPerformInstallOperation is
// called. Otherwise, we might be failing operations before even if there
// isn't sufficient data to compute the proper hash.
*error = ValidateOperationHash(op); //校验操作对应数据的hash值是否正确
if (*error != ErrorCode::kSuccess) {
if (install_plan_->hash_checks_mandatory) {
LOG(ERROR) << "Mandatory operation hash check failed";
return false;
} // For non-mandatory cases, just send a UMA stat.
LOG(WARNING) << "Ignoring operation validation errors";
*error = ErrorCode::kSuccess;
}
} // Makes sure we unblock exit when this operation completes.
ScopedTerminatorExitUnblocker exit_unblocker =
ScopedTerminatorExitUnblocker(); // Avoids a compiler unused var bug. bool op_result;
switch (op.type()) { //根据操作的类型执行对应的操作
case InstallOperation::REPLACE:
case InstallOperation::REPLACE_BZ:
case InstallOperation::REPLACE_XZ:
op_result = PerformReplaceOperation(op);
break;
case InstallOperation::ZERO:
case InstallOperation::DISCARD:
op_result = PerformZeroOrDiscardOperation(op);
break;
case InstallOperation::MOVE:
op_result = PerformMoveOperation(op);
break;
case InstallOperation::BSDIFF:
op_result = PerformBsdiffOperation(op);
break;
case InstallOperation::SOURCE_COPY:
op_result = PerformSourceCopyOperation(op, error);
break;
case InstallOperation::SOURCE_BSDIFF:
op_result = PerformSourceBsdiffOperation(op, error);
break;
case InstallOperation::IMGDIFF:
// TODO(deymo): Replace with PUFFIN operation.
op_result = false;
break;
default:
op_result = false;
}
if (!HandleOpResult(op_result, InstallOperationTypeName(op.type()), error)) //对处理结果进行打印
return false; next_operation_num_++;
UpdateOverallProgress(false, "Completed ");
CheckpointUpdateProgress(); //保存更新进度,类似于断点能够进行保存
} // In major version 2, we don't add dummy operation to the payload.
// If we already extracted the signature we should skip this step. if (major_payload_version_ == kBrilloMajorPayloadVersion &&
manifest_.has_signatures_offset() && manifest_.has_signatures_size() &&
signatures_message_data_.empty()) {
if (manifest_.signatures_offset() != buffer_offset_) {
LOG(ERROR) << "Payload signatures offset points to blob offset "
<< manifest_.signatures_offset()
<< " but signatures are expected at offset "
<< buffer_offset_;
*error = ErrorCode::kDownloadPayloadVerificationError;
return false;
}
CopyDataToBuffer(&c_bytes, &count, manifest_.signatures_size());
// Needs more data to cover entire signature.
if (buffer_.size() < manifest_.signatures_size())
return true;
if (!ExtractSignatureMessage()) { //获取升级文件中数据区域的签名
LOG(ERROR) << "Extract payload signature failed.";
*error = ErrorCode::kDownloadPayloadVerificationError;
return false;
}
DiscardBuffer(true, );
// Since we extracted the SignatureMessage we need to advance the
// checkpoint, otherwise we would reload the signature and try to extract
// it again.
CheckpointUpdateProgress();
} return true;
}
这个方法乍一看上去内容特别的多,而且如果对升级文件没有一个了解的情况下分析这段代码会有一点点困难,但是当跨过这个困难的时候就会对升级文件的结构有一个了解。要想了解升级文件的结构可以直接分析升级文件,但是在android引入A/B升级之后,升级文件是纯二进制的文件,而且还被加了密。分析起来难度也比较大,当然我们也可以分析代码中是如何解析的,根据解析我们就能够获取到升级文件的结构。另外在A/B升级中,它应用更新的流程就是下载->解析->验证->应用。这里的下载指的就是将数据加载到内存中,并且是边下载边更新,更新完之后就会把数据从内存中移除。下面是分析代码所得到升级文件的结构。
升级文件的结构
magic:是用于校验数据在内存中的地址偏移量是否正确。假设我们预期从地址0到3存放A,B,C,D可是当计算存在问题时,应该得到0的时候我们得到的是1,那么就会在1到4存放A,B,C,D,而我们再从0开始访问就会有问题。如果在内存的开始部分加入一个magic,当我们从0开始访问的时候,就先根据定义好的magic判断数据在内存中是否发生偏移错误。也就存放数据的时候我们存放magic,A,B,C,D正确的结果是0到4,但是却放到了1到5,这个时候我们依然去用0开始访问,但是我们首先会检验在0上的magic和预期的一样,如果一样则说明没有发生偏移错误,可以继续访问,如果不一样则说明偏移错误,之后可以进行相应的处理。
delta version:是差分版本,也就是update_engine的版本号
manifest_size: 代表manifest的大小,manifest意为清单文件,系统如何升级也是根据manifest来做的。
metadata_signaturesize:代表了元数据签名的大小。可以将magic,dleta version,manifest_size,metadata_signaturesize以及manifest[]称为元数据。
manifest[]: 主要的清单文件,记录了各个分区的更新的操作,以及数据信息等
metadata_signaturesize_message : 元数据的签名,而且也是经过加密的
data: 用于更新的数据
data_messgage:为data的签名信息。在kBrilloMajorPayloadVersion 这个版本中才会有。在A/B更新出现后,一共出现了两个版本一个kChromeOSMajorPayloadVersion一个kBrilloMajorPayloadVersion,kBrilloMajorPayloadVersion这个版本为新版本,也是Android8.0中使用的。
当有了这些了解后再来分析Write方法是就会简单很多。在Write中主要做的事情为:
1. ParsePayloadMetadata解析元数据。来看一下是如何解析的。
DeltaPerformer::MetadataParseResult DeltaPerformer::ParsePayloadMetadata(
const brillo::Blob& payload, ErrorCode* error) {
*error = ErrorCode::kSuccess;
uint64_t manifest_offset; if (!IsHeaderParsed()) { //没有解析过
// Ensure we have data to cover the major payload version.
if (payload.size() < kDeltaManifestSizeOffset) //kDeltaManifestSizeOffset=kDeltaVersionOffset + kDeltaVersionSize
return kMetadataParseInsufficientData; //没有将magic和delta version加载完 // Validate the magic string.
if (memcmp(payload.data(), kDeltaMagic, sizeof(kDeltaMagic)) != ) { //校验magic,
LOG(ERROR) << "Bad payload format -- invalid delta magic.";
*error = ErrorCode::kDownloadInvalidMetadataMagicString;
return kMetadataParseError;
} // Extract the payload version from the metadata.
static_assert(sizeof(major_payload_version_) == kDeltaVersionSize,
"Major payload version size mismatch");
memcpy(&major_payload_version_,
&payload[kDeltaVersionOffset], //保存DeltaVesion
kDeltaVersionSize);
// switch big endian to host
major_payload_version_ = be64toh(major_payload_version_); //转换为主机字节序 if (major_payload_version_ != supported_major_version_ && //判断版本号是否正确
major_payload_version_ != kChromeOSMajorPayloadVersion) {
LOG(ERROR) << "Bad payload format -- unsupported payload version: "
<< major_payload_version_;
*error = ErrorCode::kUnsupportedMajorPayloadVersion;
return kMetadataParseError;
} // Get the manifest offset now that we have payload version.
if (!GetManifestOffset(&manifest_offset)) { //获取指向manifest的地址偏移量
*error = ErrorCode::kUnsupportedMajorPayloadVersion;
return kMetadataParseError;
}
// Check again with the manifest offset.
if (payload.size() < manifest_offset) //判断manifset之前的数据是否都已经加载到了内存中
return kMetadataParseInsufficientData; // Next, parse the manifest size.
static_assert(sizeof(manifest_size_) == kDeltaManifestSizeSize,
"manifest_size size mismatch");
memcpy(&manifest_size_, //保存manifest的大小
&payload[kDeltaManifestSizeOffset],
kDeltaManifestSizeSize);
manifest_size_ = be64toh(manifest_size_); // 转换为主机字节序 if (GetMajorVersion() == kBrilloMajorPayloadVersion) { //如果是新版本
// Parse the metadata signature size.
static_assert(sizeof(metadata_signature_size_) ==
kDeltaMetadataSignatureSizeSize,
"metadata_signature_size size mismatch");
uint64_t metadata_signature_size_offset;
if (!GetMetadataSignatureSizeOffset(&metadata_signature_size_offset)) { //获取metadata_signature_size数据的偏移量
*error = ErrorCode::kError;
return kMetadataParseError;
}
memcpy(&metadata_signature_size_, //保存元数据的大小
&payload[metadata_signature_size_offset],
kDeltaMetadataSignatureSizeSize);
metadata_signature_size_ = be32toh(metadata_signature_size_); //转换为主机字节序
} // If the metadata size is present in install plan, check for it immediately
// even before waiting for that many number of bytes to be downloaded in the
// payload. This will prevent any attack which relies on us downloading data
// beyond the expected metadata size.
metadata_size_ = manifest_offset + manifest_size_; //计算元数据的大小
if (install_plan_->hash_checks_mandatory) { //进行强制性检查,增加安全性
if (payload_->metadata_size != metadata_size_) {
LOG(ERROR) << "Mandatory metadata size in Omaha response ("
<< payload_->metadata_size
<< ") is missing/incorrect, actual = " << metadata_size_;
*error = ErrorCode::kDownloadInvalidMetadataSize;
return kMetadataParseError;
}
}
} // Now that we have validated the metadata size, we should wait for the full
// metadata and its signature (if exist) to be read in before we can parse it.
if (payload.size() < metadata_size_ + metadata_signature_size_) //检查metadata_signature_message是否已经加载到了内存
return kMetadataParseInsufficientData; // Log whether we validated the size or simply trusting what's in the payload
// here. This is logged here (after we received the full metadata data) so
// that we just log once (instead of logging n times) if it takes n
// DeltaPerformer::Write calls to download the full manifest.
if (payload_->metadata_size == metadata_size_) { //payload_中也保存了metadata_size,进行比对一下
LOG(INFO) << "Manifest size in payload matches expected value from Omaha";
} else {
// For mandatory-cases, we'd have already returned a kMetadataParseError
// above. We'll be here only for non-mandatory cases. Just send a UMA stat.
LOG(WARNING) << "Ignoring missing/incorrect metadata size ("
<< payload_->metadata_size
<< ") in Omaha response as validation is not mandatory. "
<< "Trusting metadata size in payload = " << metadata_size_;
} // We have the full metadata in |payload|. Verify its integrity
// and authenticity based on the information we have in Omaha response.
*error = ValidateMetadataSignature(payload); //验证元数据的签名
if (*error != ErrorCode::kSuccess) {
if (install_plan_->hash_checks_mandatory) {
// The autoupdate_CatchBadSignatures test checks for this string
// in log-files. Keep in sync.
LOG(ERROR) << "Mandatory metadata signature validation failed";
return kMetadataParseError;
} // For non-mandatory cases, just send a UMA stat.
LOG(WARNING) << "Ignoring metadata signature validation failures";
*error = ErrorCode::kSuccess;
} if (!GetManifestOffset(&manifest_offset)) { //获取manifest_offset
*error = ErrorCode::kUnsupportedMajorPayloadVersion;
return kMetadataParseError;
}
// The payload metadata is deemed valid, it's safe to parse the protobuf.
if (!manifest_.ParseFromArray(&payload[manifest_offset], manifest_size_)) { //解析manifest
LOG(ERROR) << "Unable to parse manifest in update file.";
*error = ErrorCode::kDownloadManifestParseError;
return kMetadataParseError;
} manifest_parsed_ = true;
return kMetadataParseSuccess;
}
可以看到整个解析的过程也比较简单了。接下来着重看一下ValidateMetadataSignature的实现
ErrorCode DeltaPerformer::ValidateMetadataSignature(
const brillo::Blob& payload) {
if (payload.size() < metadata_size_ + metadata_signature_size_)
return ErrorCode::kDownloadMetadataSignatureError; //判断签名是否已经加载到了内存中 brillo::Blob metadata_signature_blob, metadata_signature_protobuf_blob;
if (!payload_->metadata_signature.empty()) { //payload_中已经保存了metadata_signature
// Convert base64-encoded signature to raw bytes.
if (!brillo::data_encoding::Base64Decode(payload_->metadata_signature,
&metadata_signature_blob)) { //先对签名进行Base64的简码
LOG(ERROR) << "Unable to decode base64 metadata signature: "
<< payload_->metadata_signature;
return ErrorCode::kDownloadMetadataSignatureError;
}
} else if (major_payload_version_ == kBrilloMajorPayloadVersion) {
metadata_signature_protobuf_blob.assign( //没有保存就从内存中加载
payload.begin() + metadata_size_,
payload.begin() + metadata_size_ + metadata_signature_size_);
} if (metadata_signature_blob.empty() &&
metadata_signature_protobuf_blob.empty()) { //没有metadata_signature
if (install_plan_->hash_checks_mandatory) {
LOG(ERROR) << "Missing mandatory metadata signature in both Omaha "
<< "response and payload.";
return ErrorCode::kDownloadMetadataSignatureMissingError;
} LOG(WARNING) << "Cannot validate metadata as the signature is empty";
return ErrorCode::kSuccess;
} // See if we should use the public RSA key in the Omaha response.
base::FilePath path_to_public_key(public_key_path_);
base::FilePath tmp_key;
if (GetPublicKeyFromResponse(&tmp_key)) //检查install_plan_中是否已经带了公钥
path_to_public_key = tmp_key;
ScopedPathUnlinker tmp_key_remover(tmp_key.value());
if (tmp_key.empty())
tmp_key_remover.set_should_remove(false); LOG(INFO) << "Verifying metadata hash signature using public key: "
<< path_to_public_key.value(); brillo::Blob calculated_metadata_hash;
if (!HashCalculator::RawHashOfBytes( //根据元数据计算一个hash
payload.data(), metadata_size_, &calculated_metadata_hash)) {
LOG(ERROR) << "Unable to compute actual hash of manifest";
return ErrorCode::kDownloadMetadataSignatureVerificationError;
} PayloadVerifier::PadRSA2048SHA256Hash(&calculated_metadata_hash); //对hash进行填充
if (calculated_metadata_hash.empty()) {
LOG(ERROR) << "Computed actual hash of metadata is empty.";
return ErrorCode::kDownloadMetadataSignatureVerificationError;
} if (!metadata_signature_blob.empty()) { //payload_中已经保存了签名
brillo::Blob expected_metadata_hash;
if (!PayloadVerifier::GetRawHashFromSignature(metadata_signature_blob, //使用公钥对其进行解密
path_to_public_key.value(),
&expected_metadata_hash)) {
LOG(ERROR) << "Unable to compute expected hash from metadata signature";
return ErrorCode::kDownloadMetadataSignatureError;
}
if (calculated_metadata_hash != expected_metadata_hash) { //判断保存的和自己算出来的签名是否相同
LOG(ERROR) << "Manifest hash verification failed. Expected hash = ";
utils::HexDumpVector(expected_metadata_hash);
LOG(ERROR) << "Calculated hash = ";
utils::HexDumpVector(calculated_metadata_hash);
return ErrorCode::kDownloadMetadataSignatureMismatch;
}
} else { //在升级数据中含有签名信息时,对签名的校验
if (!PayloadVerifier::VerifySignature(metadata_signature_protobuf_blob,
path_to_public_key.value(),
calculated_metadata_hash)) {
LOG(ERROR) << "Manifest hash verification failed.";
return ErrorCode::kDownloadMetadataSignatureMismatch;
}
} // The autoupdate_CatchBadSignatures test checks for this string in
// log-files. Keep in sync.
LOG(INFO) << "Metadata hash signature matches value in Omaha response.";
return ErrorCode::kSuccess;
}
这个方法主要说明了metadata_signature签名的验证机制,其中有一个payload_,是在DeltaPerformer构造函数中赋的值。接下来在分析manifest_.ParseFromArray(&payload[manifest_offset], manifest_size_),在初看到这行代码的时候,花了很长时间也没有找到ParseFromArray的实现。DeltaArchiveManifest类也没有找到对应的C++类,但是却找到了update_metadata_pb2.py和update_metadata.proto。update_metadata.proto的内容如下
src/system/update_engine/update_metadata.proto
message Extent {
optional uint64 start_block = ;
optional uint64 num_blocks = ;
} message Signatures {
message Signature {
optional uint32 version = ;
optional bytes data = ;
}
repeated Signature signatures = ;
} message PartitionInfo {
optional uint64 size = ;
optional bytes hash = ;
} // Describe an image we are based on in a human friendly way.
// Examples:
// dev-channel, x86-alex, 1.2.3, mp-v3
// nplusone-channel, x86-alex, 1.2.4, mp-v3, dev-channel, 1.2.3
//
// All fields will be set, if this message is present.
message ImageInfo {
optional string board = ;
optional string key = ;
optional string channel = ;
optional string version = ; // If these values aren't present, they should be assumed to match
// the equivalent value above. They are normally only different for
// special image types such as nplusone images.
optional string build_channel = ;
optional string build_version = ;
} message InstallOperation {
enum Type {
REPLACE = ; // Replace destination extents w/ attached data
REPLACE_BZ = ; // Replace destination extents w/ attached bzipped data
MOVE = ; // Move source extents to destination extents
BSDIFF = ; // The data is a bsdiff binary diff // On minor version 2 or newer, these operations are supported:
SOURCE_COPY = ; // Copy from source to target partition
SOURCE_BSDIFF = ; // Like BSDIFF, but read from source partition // On minor version 3 or newer and on major version 2 or newer, these
// operations are supported:
ZERO = ; // Write zeros in the destination.
DISCARD = ; // Discard the destination blocks, reading as undefined.
REPLACE_XZ = ; // Replace destination extents w/ attached xz data. // On minor version 4 or newer, these operations are supported:
IMGDIFF = ; // The data is in imgdiff format.
}
required Type type = ;
// The offset into the delta file (after the protobuf)
// where the data (if any) is stored
optional uint32 data_offset = ;
// The length of the data in the delta file
optional uint32 data_length = ; // Ordered list of extents that are read from (if any) and written to.
repeated Extent src_extents = ;
// Byte length of src, equal to the number of blocks in src_extents *
// block_size. It is used for BSDIFF, because we need to pass that
// external program the number of bytes to read from the blocks we pass it.
// This is not used in any other operation.
optional uint64 src_length = ; repeated Extent dst_extents = ;
// Byte length of dst, equal to the number of blocks in dst_extents *
// block_size. Used for BSDIFF, but not in any other operation.
optional uint64 dst_length = ; // Optional SHA 256 hash of the blob associated with this operation.
// This is used as a primary validation for http-based downloads and
// as a defense-in-depth validation for https-based downloads. If
// the operation doesn't refer to any blob, this field will have
// zero bytes.
optional bytes data_sha256_hash = ; // Indicates the SHA 256 hash of the source data referenced in src_extents at
// the time of applying the operation. If present, the update_engine daemon
// MUST read and verify the source data before applying the operation.
optional bytes src_sha256_hash = ;
} // Describes the update to apply to a single partition.
message PartitionUpdate {
// A platform-specific name to identify the partition set being updated. For
// example, in Chrome OS this could be "ROOT" or "KERNEL".
required string partition_name = ; // Whether this partition carries a filesystem with post-install program that
// must be run to finalize the update process. See also |postinstall_path| and
// |filesystem_type|.
optional bool run_postinstall = ; // The path of the executable program to run during the post-install step,
// relative to the root of this filesystem. If not set, the default "postinst"
// will be used. This setting is only used when |run_postinstall| is set and
// true.
optional string postinstall_path = ; // The filesystem type as passed to the mount(2) syscall when mounting the new
// filesystem to run the post-install program. If not set, a fixed list of
// filesystems will be attempted. This setting is only used if
// |run_postinstall| is set and true.
optional string filesystem_type = ; // If present, a list of signatures of the new_partition_info.hash signed with
// different keys. If the update_engine daemon requires vendor-signed images
// and has its public key installed, one of the signatures should be valid
// for /postinstall to run.
repeated Signatures.Signature new_partition_signature = ; optional PartitionInfo old_partition_info = ;
optional PartitionInfo new_partition_info = ; // The list of operations to be performed to apply this PartitionUpdate. The
// associated operation blobs (in operations[i].data_offset, data_length)
// should be stored contiguously and in the same order.
repeated InstallOperation operations = ; // Whether a failure in the postinstall step for this partition should be
// ignored.
optional bool postinstall_optional = ;
} message DeltaArchiveManifest {
// Only present in major version = 1. List of install operations for the
// kernel and rootfs partitions. For major version = 2 see the |partitions|
// field.
repeated InstallOperation install_operations = ;
repeated InstallOperation kernel_install_operations = ; // (At time of writing) usually 4096
optional uint32 block_size = [default = ]; // If signatures are present, the offset into the blobs, generally
// tacked onto the end of the file, and the length. We use an offset
// rather than a bool to allow for more flexibility in future file formats.
// If either is absent, it means signatures aren't supported in this
// file.
optional uint64 signatures_offset = ;
optional uint64 signatures_size = ; // Only present in major version = 1. Partition metadata used to validate the
// update. For major version = 2 see the |partitions| field.
optional PartitionInfo old_kernel_info = ;
optional PartitionInfo new_kernel_info = ;
optional PartitionInfo old_rootfs_info = ;
optional PartitionInfo new_rootfs_info = ; // old_image_info will only be present for delta images.
optional ImageInfo old_image_info = ; optional ImageInfo new_image_info = ; // The minor version, also referred as "delta version", of the payload.
optional uint32 minor_version = [default = ]; // Only present in major version >= 2. List of partitions that will be
// updated, in the order they will be updated. This field replaces the
// |install_operations|, |kernel_install_operations| and the
// |{old,new}_{kernel,rootfs}_info| fields used in major version = 1. This
// array can have more than two partitions if needed, and they are identified
// by the partition name.
repeated PartitionUpdate partitions = ; // The maximum timestamp of the OS allowed to apply this payload.
// Can be used to prevent downgrading the OS.
optional int64 max_timestamp = ;
}
可以看出它应该就是由update_metadata_pb2.py这个脚本解析的manifest的数据格式。后来了解到这是Protobuf数据格式,是比xml和json更加高效的数据格式,采用了二进制的存储。那么其实根据DeltaArchiveManifest我们就能大体推断出Manifest中所包含的数据类型。主要就是安装更新操作的类型,数据的签名,新旧内核,rootfs,ImageInfo,分区更新等。到此ParsePayloadMetadata这个方法就算是分析完了。回到Write中继续分析,当解析完成了Manifest之后,就调用了ValidateManifest()来验证manifest。
2.ValidateManifest()来验证manifest
ErrorCode DeltaPerformer::ValidateManifest() {
// Perform assorted checks to sanity check the manifest, make sure it
// matches data from other sources, and that it is a supported version. bool has_old_fields =
(manifest_.has_old_kernel_info() || manifest_.has_old_rootfs_info());
for (const PartitionUpdate& partition : manifest_.partitions()) {
has_old_fields = has_old_fields || partition.has_old_partition_info();
} // The presence of an old partition hash is the sole indicator for a delta
// update.
InstallPayloadType actual_payload_type =
has_old_fields ? InstallPayloadType::kDelta : InstallPayloadType::kFull; //获取升级的类型 if (payload_->type == InstallPayloadType::kUnknown) { //payload_->type的默认值是KUnknown
LOG(INFO) << "Detected a '"
<< InstallPayloadTypeToString(actual_payload_type)
<< "' payload.";
payload_->type = actual_payload_type;
} else if (payload_->type != actual_payload_type) {
LOG(ERROR) << "InstallPlan expected a '"
<< InstallPayloadTypeToString(payload_->type)
<< "' payload but the downloaded manifest contains a '"
<< InstallPayloadTypeToString(actual_payload_type)
<< "' payload.";
return ErrorCode::kPayloadMismatchedType;
} // Check that the minor version is compatible.
if (actual_payload_type == InstallPayloadType::kFull) { //进行更加安全性检测
if (manifest_.minor_version() != kFullPayloadMinorVersion) {
LOG(ERROR) << "Manifest contains minor version "
<< manifest_.minor_version()
<< ", but all full payloads should have version "
<< kFullPayloadMinorVersion << ".";
return ErrorCode::kUnsupportedMinorPayloadVersion;
}
} else {
if (manifest_.minor_version() != supported_minor_version_) {
LOG(ERROR) << "Manifest contains minor version "
<< manifest_.minor_version()
<< " not the supported "
<< supported_minor_version_;
return ErrorCode::kUnsupportedMinorPayloadVersion;
}
} if (major_payload_version_ != kChromeOSMajorPayloadVersion) {
if (manifest_.has_old_rootfs_info() || //这些字段只应该在kChromeOSMajorPayloadVersion中有
manifest_.has_new_rootfs_info() ||
manifest_.has_old_kernel_info() ||
manifest_.has_new_kernel_info() ||
manifest_.install_operations_size() != ||
manifest_.kernel_install_operations_size() != ) {
LOG(ERROR) << "Manifest contains deprecated field only supported in "
<< "major payload version 1, but the payload major version is "
<< major_payload_version_;
return ErrorCode::kPayloadMismatchedType;
}
} if (manifest_.max_timestamp() < hardware_->GetBuildTimestamp()) { //对时间戳的检测
LOG(ERROR) << "The current OS build timestamp ("
<< hardware_->GetBuildTimestamp()
<< ") is newer than the maximum timestamp in the manifest ("
<< manifest_.max_timestamp() << ")";
return ErrorCode::kPayloadTimestampError;
} // TODO(garnold) we should be adding more and more manifest checks, such as
// partition boundaries etc (see chromium-os:37661). return ErrorCode::kSuccess;
}
这个方法主要验证的了升级的类型,已经升级程序版本的正确性,最后对时间戳进行了一次校验,理论上升级包中新版本的时间戳应该比系统中当前版本的时间戳更新一些,才允许升级。对manifest进行了校验之后,在Write方法中标记manifest_valid_为true,清空缓存区后,开始对分区信息进行解析。
3.解析Manifest中的Partitions的信息
bool DeltaPerformer::ParseManifestPartitions(ErrorCode* error) {
if (major_payload_version_ == kBrilloMajorPayloadVersion) {
partitions_.clear();
for (const PartitionUpdate& partition : manifest_.partitions()) {
partitions_.push_back(partition); //将partitons的信息保存到partitions中
}
manifest_.clear_partitions(); //将manifest_中的分区信息进行删除
} else if (major_payload_version_ == kChromeOSMajorPayloadVersion) {
LOG(INFO) << "Converting update information from old format.";
//这部分是老版本的在使用,就先不进行分析了
} // Fill in the InstallPlan::partitions based on the partitions from the
// payload.
for (const auto& partition : partitions_) {
InstallPlan::Partition install_part;
install_part.name = partition.partition_name(); //分区的name
install_part.run_postinstall = //postinstall
partition.has_run_postinstall() && partition.run_postinstall();
if (install_part.run_postinstall) {
install_part.postinstall_path =
(partition.has_postinstall_path() ? partition.postinstall_path()
: kPostinstallDefaultScript);
install_part.filesystem_type = partition.filesystem_type();
install_part.postinstall_optional = partition.postinstall_optional();
} if (partition.has_old_partition_info()) { //获取old 分区中的信息
const PartitionInfo& info = partition.old_partition_info();
install_part.source_size = info.size();
install_part.source_hash.assign(info.hash().begin(), info.hash().end());
} if (!partition.has_new_partition_info()) {
LOG(ERROR) << "Unable to get new partition hash info on partition "
<< install_part.name << ".";
*error = ErrorCode::kDownloadNewPartitionInfoError;
return false;
}
const PartitionInfo& info = partition.new_partition_info();
install_part.target_size = info.size(); //新分区的信息
install_part.target_hash.assign(info.hash().begin(), info.hash().end()); install_plan_->partitions.push_back(install_part); //保存到install_plan_
} if (!install_plan_->LoadPartitionsFromSlots(boot_control_)) { //根据分区name,slot,获取分区的路径
LOG(ERROR) << "Unable to determine all the partition devices.";
*error = ErrorCode::kInstallDeviceOpenError;
return false;
}
LogPartitionInfo(partitions_); //打印分区信息
return true;
}
其实解析分区主要就是将分区信息从manifest_中转移到install_plan_。在Write中最后做的就是获取操作数,获取操作类型,根据操作类型执行对应的操作,验证payload中数据的签名。其中需要注意的是关于操作数的计算和更新数据的校验。
4.关于操作数的计算,可以看下面相关的部分
num_total_operations_ = ;
for (const auto& partition : partitions_) {
num_total_operations_ += partition.operations_size();
acc_num_operations_.push_back(num_total_operations_);
} while (next_operation_num_ >= acc_num_operations_[current_partition_]) {
CloseCurrentPartition();
current_partition_++;
if (!OpenCurrentPartition()) {
*error = ErrorCode::kInstallDeviceOpenError;
return false;
}
} const size_t partition_operation_num = next_operation_num_ - (
current_partition_ ? acc_num_operations_[current_partition_ - ] : );
假设有分区A,B,C对应的操作数为2,4,6。那么num_total_operations_ =12,acc_num_operations_.中存放的元素为2,6,12,此时执行到了第2个操作,next_operation_num_ =2,而2是等于acc_num_operations_[0]的,而存放操作的数组是从0开始的,也就是说,当next_operation_num_等于acc_num_operations_时也就是说当前分区的操作已经执行完了,应该切换到下一个分区了,最后根据next_operation_num_和acc_num_operations_计算出操作类型的索引,获取对应的操作类型。最后对于更新数据的校验是指每当应用所下载的数据的时候,都会对其进行校验,首先是保存了数据的hash值之后再根据所下载的数据计算一个hash指,进行比对,验证数据是否正确。
到这里DownloadAction的核心部分已经分析完成,下面一篇文章会分析FilesystemVerifierAction,PostinstallRunnerAction。
update_engine-DownloadAction(二)的更多相关文章
- strut2-学习笔记(二)
Struts2学习笔记(二) 1. 自定义结果视图的类型(结果视图类型的应用) CAPTCHA图像(随机验证码图像) 实现步骤: (1)编写一个类实现com.opensymphony.xwork ...
- update_engine-整体结构(二)
在update_engine-整体结构(一)中分析UpdateEngineDaemon::OnInit()的整体情况.下面先分析在该方法中涉及的DaemonStateAndroid和BinderUpd ...
- java struts2入门学习---文件下载的二种方式
一.关于文件下载: 文件下载的核心思想即是将文件从一个地方拷贝到另一个地方. 1.传统方式: 在Action中加入大量servlet api 操作.优点是好理解,缺点是耦合度高. 2.stream方式 ...
- JAVA生成解析二维码
package com.mohe.twocode; import java.awt.Color; import java.awt.Graphics2D; import java.awt.image.B ...
- 【小程序分享篇 二 】web在线踢人小程序,维持用户只能在一个台电脑持登录状态
最近离职了, 突然记起来还一个小功能没做, 想想也挺简单,留下代码和思路给同事做个参考. 换工作心里挺忐忑, 对未来也充满了憧憬与担忧.(虽然已是老人, 换了N次工作了,但每次心里都和忐忑). 写写代 ...
- 前端开发中SEO的十二条总结
一. 合理使用title, description, keywords二. 合理使用h1 - h6, h1标签的权重很高, 注意使用频率三. 列表代码使用ul, 重要文字使用strong标签四. 图片 ...
- 【疯狂造轮子-iOS】JSON转Model系列之二
[疯狂造轮子-iOS]JSON转Model系列之二 本文转载请注明出处 —— polobymulberry-博客园 1. 前言 上一篇<[疯狂造轮子-iOS]JSON转Model系列之一> ...
- 【原】Android热更新开源项目Tinker源码解析系列之二:资源文件热更新
上一篇文章介绍了Dex文件的热更新流程,本文将会分析Tinker中对资源文件的热更新流程. 同Dex,资源文件的热更新同样包括三个部分:资源补丁生成,资源补丁合成及资源补丁加载. 本系列将从以下三个方 ...
- 谈谈一些有趣的CSS题目(十二)-- 你该知道的字体 font-family
开本系列,谈谈一些有趣的 CSS 题目,题目类型天马行空,想到什么说什么,不仅为了拓宽一下解决问题的思路,更涉及一些容易忽视的 CSS 细节. 解题不考虑兼容性,题目天马行空,想到什么说什么,如果解题 ...
随机推荐
- C#中异步使用及回调
1. 一句话理解异步 我叫你去吃饭,叫完你不去,那我就会一直等你,直到你和我一起去吃饭.这叫同步! 我叫你去吃饭,叫完不管你去不去,我都不会等你,我自己去吃饭.这叫异步! 2. 异步使用 static ...
- oracle中创建数据库
一.在Oracle中创建数据库之前先改一下虚拟机的IP地址,以便访问 2. 3. 3.1 3.2 3.3 3.4 创建完成:输入sqlplus sys/123456 as sysdba测试
- win8 tiles风格标签插件jquery.wordbox.js
http://www.html580.com/12180 jquery.wordbox.js轻松实现win8瓦片tiles式风格标签插件,只需要调用JS就能轻松实现瓦片菜单,自定义菜单背景颜色,支持响 ...
- caog
import pandas as pd#匹配可发库存1. import oslst=os.listdir(r'E:\每日必做\琪琪小象库存')lst1=[]for i in lst: if i[:2] ...
- Android测试中常用的adb命令
进入root权限adb root adb remount 重启手机 adb reboot 查看手机devices版本(adb是否连接手机) adb devices 点亮手机电源键/菜单键/home键 ...
- 在vue.js 中使用animate.css库
main.js文件引入后,在vue文件里直接添加class animated bounceInUp
- C语言链表:逆序建立单链表
#define _CRT_SECURE_NO_WARNINGS #include<stdio.h> #include<malloc.h> #define LEN sizeof( ...
- 【论文笔记】Malware Detection with Deep Neural Network Using Process Behavior
[论文笔记]Malware Detection with Deep Neural Network Using Process Behavior 论文基本信息 会议: IEEE(2016 IEEE 40 ...
- PAT乙级考前总结(一)
数学相关的题目 1001 害死人不偿命的(3n+1)猜想 (15 分) 直接一步步计数 1005 继续(3n+1)猜想 (25 分) 卡拉兹(Callatz)猜想已经在1001中给出了描述.在这个题目 ...
- 小妖精的完美游戏教室——buff系统
作者:小妖精Balous,未经作者允许,任何个人与单位不得将此源代码用于商业化项目 #region buff /// <summary> /// 是否魔法免疫,魔法免疫的生物不会受到除自己 ...