diff --git a/pkg_auto/impl/inside_sdk_container_lib.sh b/pkg_auto/impl/inside_sdk_container_lib.sh index ec7480dd74e..3f1bdd20079 100644 --- a/pkg_auto/impl/inside_sdk_container_lib.sh +++ b/pkg_auto/impl/inside_sdk_container_lib.sh @@ -434,20 +434,6 @@ function clean_empty_warning_files() { done } -function get_num_proc() { - local -n num_proc_ref=${1}; shift - local -i num_proc=1 rv=1 - - # stolen from portage - [[ rv -eq 0 ]] || { rv=0; num_proc=$(getconf _NPROCESSORS_ONLN 2>/dev/null) || rv=1; } - [[ rv -eq 0 ]] || { rv=0; num_proc=$(sysctl -n hw.ncpu 2>/dev/null) || rv=1; } - # stolen from common.sh - [[ rv -eq 0 ]] || { rv=0; num_proc=$(grep -c "^processor" /proc/cpuinfo 2>/dev/null) || rv=1; } - [[ rv -eq 0 ]] || { rv=0; num_proc=1; } - - num_proc_ref=${num_proc} -} - function generate_cache_for() { local repo=${1}; shift diff --git a/pkg_auto/impl/jobs_lib.sh b/pkg_auto/impl/jobs_lib.sh new file mode 100644 index 00000000000..387557d0be9 --- /dev/null +++ b/pkg_auto/impl/jobs_lib.sh @@ -0,0 +1,248 @@ +#!/bin/bash + +# This file implements a simple jobs functionality - running tasks in +# separate processes. Communication with the job goes through standard +# input/output/error. +# +# An example use of the library is as follows: +# +# # Echoes everything received on standard input to standard +# # output. Will finish if receives "shutdown". +# function echoer_job() { +# local REPLY +# while read -r; do +# echo "${REPLY}" +# if [[ ${REPLY} = 'shutdown' ]]; then +# return 0 +# fi +# done +# } +# +# job_declare echoer +# job_run -m echoer_job +# job_send_input echoer one two three shutdown +# declare -a echoer_lines=() +# declare echoer_done='' +# while [[ -z ${echoer_done} ]]; do +# if ! job_is_alive echoer; then +# echoer_done=x +# fi +# job_get_output echoer echoer_lines +# printf '%s\n' ${echoer_lines[@]/#/'from echoer job: '} +# sleep 0.1 +# done +# declare -i echoer_exit_status +# job_reap echoer echoer_exit_status +# job_unset echoer +# echo "echoer done with exit status ${exit_status}" + + +if [[ -z ${__JOBS_LIB_SH_INCLUDED__:-} ]]; then +__JOBS_LIB_SH_INCLUDED__=x + +source "$(dirname "${BASH_SOURCE[0]}")/util.sh" + +# Fields of the job struct. +# +# JOB_INFD_IDX - descriptor for standard input +# +# JOB_OUTFD_IDX - descriptor for standard output +# +# JOB_ERRFD_IDX - descriptor for standard error +# +# JOB_PID_IDX - job PID +declare -gri JOB_INFD_IDX=0 JOB_OUTFD_IDX=1 JOB_ERRFD_IDX=2 JOB_PID_IDX=3 + +# Declare job variables. +# +# Parameters: +# +# @ - names of variables to be used for jobs +function job_declare() { + struct_declare -ga "${@}" "('-1' '-1' '-1' '-1')" +} + +# Unset job variables. +# +# Parameters: +# +# @ - names of job variables +function job_unset() { + unset "${@}" +} + +# Kick off the job. The passed function will be run as a separate +# process, and following parameters will be passed to the +# function. For this to work, the job must be just-declared or reaped. +# +# Flags: +# +# -m - merge standard output and standard error into one descriptor +# +# Parameters: +# +# 1 - job +# @ - function to run, followed by its parameters +function job_run() { + local merge_out_with_err= + if [[ ${1:-} = '-m' ]]; then + shift + merge_out_with_err=x + fi + local -n job_ref=${1}; shift + # rest are function and args to run + + local -i pid=${job_ref[JOB_PID_IDX]} infd=${job_ref[JOB_INFD_IDX]} outfd=${job_ref[JOB_OUTFD_IDX]} errfd=${job_ref[JOB_ERRFD_IDX]} + if [[ pid -ne -1 || infd -ne -1 || outfd -ne -1 || errfd -ne -1 ]]; then + fail "trying to run a job (${*@Q}), while one is already running (pid: ${pid})" + fi + + local fd_var_name path + local -i new_fd + local -a var_names=( infd outfd ) + if [[ -z ${merge_out_with_err} ]]; then + var_names+=( errfd ) + fi + for fd_var_name in "${var_names[@]}"; do + local -n fifo_fd_ref=${fd_var_name} + path=$(mktemp -t -u pipe-XXXXXXXXXXXXXXXXX) + + mkfifo "${path}" + exec {new_fd}<>"${path}" + rm -f "${path}" + fifo_fd_ref=${new_fd} + unset -n fifo_fd_ref + done + if [[ -n ${merge_out_with_err} ]]; then + errfd=${outfd} + fi + __jl_job_runner__ "${@}" <&${infd} >&${outfd} 2>&${errfd} & + pid=${!} + job_ref[JOB_PID_IDX]=${pid} + job_ref[JOB_INFD_IDX]=${infd} + job_ref[JOB_OUTFD_IDX]=${outfd} + job_ref[JOB_ERRFD_IDX]=${errfd} +} + +# Reap the dead job and get its exit status. After this call, the job +# can be reused for another run or unset. +# +# Parameters: +# +# 1 - job +# 2 - name of a scalar variable, where exit status will be stored +function job_reap() { + local -n job_ref=${1}; shift + local -n exit_status_ref=${1}; shift + + local -i pid=${job_ref[JOB_PID_IDX]} infd=${job_ref[JOB_INFD_IDX]} outfd=${job_ref[JOB_OUTFD_IDX]} errfd=${job_ref[JOB_ERRFD_IDX]} + if [[ pid -eq -1 || infd -eq -1 || outfd -eq -1 || errfd -eq -1 ]]; then + fail "trying to reap nonexistent job" + fi + + local -i es=0 + wait "${pid}" || es=${?} + + local fd_var_name + local -i old_fd + local -a var_names=(infd outfd) + if [[ outfd -ne errfd ]]; then + var_names+=( errfd ) + fi + for fd_var_name in "${var_names[@]}"; do + local -n fifo_fd_ref=${fd_var_name} + old_fd=${fifo_fd_ref} + exec {old_fd}>&- + unset -n fifo_fd_ref + done + job_ref[JOB_PID_IDX]=-1 + job_ref[JOB_INFD_IDX]=-1 + job_ref[JOB_OUTFD_IDX]=-1 + job_ref[JOB_ERRFD_IDX]=-1 + exit_status_ref=${es} +} + +# Check if job is still alive. Returns true if so, otherwise false. +# +# Parameters: +# +# 1 - job +function job_is_alive() { + local -n job_ref=${1}; shift + + local -i pid=${job_ref[JOB_PID_IDX]} + if [[ pid -eq -1 ]]; then + fail "checking status of not-yet-running or already-reaped job" + fi + # dc - don't care + local status dc + local -i ppid + { + read -r dc dc status ppid dc <"/proc/${pid}/stat" || return 1 + } >/dev/null 2>&1 + if [[ ppid -ne ${BASHPID} && ppid -ne ${$} ]]; then + # parent pid mismatch, this means that our child died and the + # pid got reused + return 1 + fi + case ${status} in + x|X|Z) + # I don't think I have ever reached this line. + return 1 + ;; + esac + return 0 +} + +# Send data to the job. Every data parameter will be a separate line. +# +# Parameters: +# +# 1 - job +# @ - data to send +function job_send_input() { + local -n job_ref=${1}; shift + # rest are lines to send + local -i infd=${job_ref[JOB_INFD_IDX]} + + printf '%s\n' "${@}" >&${infd} +} + +# Get output from the job. Tries to read a line from standard output, +# then a line from standard error. If any of the reads returned data, +# it will repeat the operations. If -m was passed to job_run, then it +# will only read from standard output. +# +# Parameters: +# +# 1 - job +# 2 - name of an array variable where the read lines will be stored +function job_get_output() { + local -n job_ref=${1}; shift + local -n output_lines_ref=${1}; shift + + local -i outfd=${job_ref[JOB_OUTFD_IDX]} errfd=${job_ref[JOB_ERRFD_IDX]} + local REPLY got_output=x + + output_lines_ref=() + while [[ -n ${got_output} ]]; do + got_output= + if read -t 0 -u ${outfd}; then + read -r -u ${outfd} + output_lines_ref+=( "${REPLY}" ) + got_output=x + fi + if [[ errfd -ne outfd ]] && read -t 0 -u ${errfd}; then + read -r -u ${errfd} + output_lines_ref+=( "${REPLY}" ) + got_output=x + fi + done +} + +function __jl_job_runner__() { + set -euo pipefail + "${@}" +} + +fi diff --git a/pkg_auto/impl/pkg_auto_lib.sh b/pkg_auto/impl/pkg_auto_lib.sh index 83640b4b5cd..4958e445ba2 100644 --- a/pkg_auto/impl/pkg_auto_lib.sh +++ b/pkg_auto/impl/pkg_auto_lib.sh @@ -53,6 +53,7 @@ source "$(dirname "${BASH_SOURCE[0]}")/util.sh" source "${PKG_AUTO_IMPL_DIR}/cleanups.sh" source "${PKG_AUTO_IMPL_DIR}/debug.sh" source "${PKG_AUTO_IMPL_DIR}/gentoo_ver.sh" +source "${PKG_AUTO_IMPL_DIR}/jobs_lib.sh" source "${PKG_AUTO_IMPL_DIR}/md5_cache_diff_lib.sh" # Sets up the workdir using the passed config. The config can be @@ -846,6 +847,20 @@ function lines_to_file() { printf '%s\n' "${@:2}" >>"${1}" } +# Adds lines to "manual work needed" file in given directory. +# +# Params: +# +# 1 - directory where the file is +# @ - lines to add +function manual_d() { + local dir=${1}; shift + # rest are lines to print + + pkg_debug_lines 'manual work needed:' "${@}" + lines_to_file "${dir}/manual-work-needed" "${@}" +} + # Adds lines to "manual work needed" file in reports. # # Params: @@ -855,8 +870,21 @@ function manual() { # shellcheck source=for-shellcheck/globals source "${WORKDIR}/globals" - pkg_debug_lines 'manual work needed:' "${@}" - lines_to_file "${REPORTS_DIR}/manual-work-needed" "${@}" + manual_d "${REPORTS_DIR}" "${@}" +} + +# Adds lines to "warnings" file in given directory. Should be used to +# report some issues with the processed packages. +# +# Params: +# +# 1 - directory where the file is +# @ - lines to add +function pkg_warn_d() { + local dir=${1}; shift + + pkg_debug_lines 'pkg warn:' "${@}" + lines_to_file "${dir}/warnings" "${@}" } # Adds lines to "warnings" file in reports. Should be used to report @@ -869,8 +897,21 @@ function pkg_warn() { # shellcheck source=for-shellcheck/globals source "${WORKDIR}/globals" - pkg_debug_lines 'pkg warn:' "${@}" - lines_to_file "${REPORTS_DIR}/warnings" "${@}" + pkg_warn_d "${REPORTS_DIR}" "${@}" +} + +# Adds lines to "developer warnings" file in given directory. Should +# be used to report some failed assumption in the automation, or bugs. +# +# Params: +# +# 1 - directory where the file is +# @ - lines to add +function devel_warn_d() { + local dir=${1}; shift + + pkg_debug_lines 'developer warn:' "${@}" + lines_to_file "${dir}/developer-warnings" "${@}" } # Adds lines to "developer warnings" file in reports. Should be used @@ -883,8 +924,7 @@ function devel_warn() { # shellcheck source=for-shellcheck/globals source "${WORKDIR}/globals" - pkg_debug_lines 'developer warn:' "${@}" - lines_to_file "${REPORTS_DIR}/developer-warnings" "${@}" + devel_warn_d "${REPORTS_DIR}" "${@}" } # Handles package names that were missing from Gentoo by either @@ -1051,6 +1091,51 @@ function set_mvm_to_array_mvm_cb() { mvm_add "${pkg_to_tags_mvm_var_name}" "${pkg}" "${prod_item[@]}" "${sorted_items[@]}" } +# Fields of the sdk job state struct. +# +# SJS_COMMAND_IDX - an array containing a command to run +# +# SJS_STATE_DIR_IDX - run's state directory +# +# SJS_KIND_IDX - run kind (either old or new, meaning reports for +# packages before updates or after updates) +# +# SJS_JOB_NAME_IDX - job variable name +declare -gri SJS_COMMAND_IDX=0 SJS_STATE_DIR_IDX=1 SJS_KIND_IDX=2 SJS_JOB_NAME_IDX=3 + +# Declare SDK job state variables. +# +# Parameters: +# +# @ - names of variables to be used for states. +function sdk_job_state_declare() { + struct_declare -ga "${@}" "( 'EMPTY_ARRAY' '' '' '' )" +} + +# Unset SDK job state variables. +# +# Parameters: +# +# @ - names of state variables +function sdk_job_state_unset() { + local name + for name; do + local -n sdk_job_state_ref=${name} + local array_name=${sdk_job_state_ref[SJS_COMMAND_IDX]} + if [[ ${array_name} != 'EMPTY_ARRAY' ]]; then + unset "${array_name}" + fi + unset array_name + local job_name=${sdk_job_state_ref[SJS_JOB_NAME_IDX]} + if [[ -n ${job_name} ]]; then + job_unset "${job_name}" + fi + unset job_name + unset -n sdk_run_state_ref + done + unset "${@}" +} + # Generate package reports inside SDKs for all arches and states. In # case of failure, whatever reports where generated so far will be # stored in salvaged-reports subdirectory of the reports directory. @@ -1071,6 +1156,13 @@ function generate_sdk_reports() { local sdk_reports_dir top_dir dir entry full_path local -a dir_queue all_dirs all_files + local job_args_var_name sdk_job_state_name + local -a sdk_job_state_names=() + + # First create and set up SDK job states for the "before updates" + # (referred as old) and "after updates" (referred as new) + # jobs. This means creating a separate worktrees, state + # directories, and preparing commands to be run as a job. for sdk_run_kind in "${WHICH[@]}"; do state_var_name="${sdk_run_kind^^}_STATE" sdk_run_state="${!state_var_name}_sdk_run" @@ -1088,22 +1180,115 @@ function generate_sdk_reports() { pkg_auto_copy=$(mktemp --tmpdir="${WORKDIR}" --directory "pkg-auto-copy.XXXXXXXX") add_cleanup "rm -rf ${pkg_auto_copy@Q}" cp -a "${PKG_AUTO_DIR}"/* "${pkg_auto_copy}" - local -a run_sdk_container_args=( - -C "${SDK_IMAGE}" - -n "pkg-${sdk_run_kind}" - -U - -m "${pkg_auto_copy}:/mnt/host/source/src/scripts/pkg_auto" - --rm - ./pkg_auto/inside_sdk_container.sh pkg-reports "${ARCHES[@]}" + gen_varname job_args_var_name + declare -ga "${job_args_var_name}=()" + local -n job_args_ref=${job_args_var_name} + job_args_ref=( + env + --chdir "${sdk_run_state}" + ./run_sdk_container + -C "${SDK_IMAGE}" + -n "pkg-${sdk_run_kind}" + -U + -m "${pkg_auto_copy}:/mnt/host/source/src/scripts/pkg_auto" + --rm + ./pkg_auto/inside_sdk_container.sh + pkg-reports + "${ARCHES[@]}" ) - rv=0 - env --chdir "${sdk_run_state}" ./run_sdk_container "${run_sdk_container_args[@]}" || rv=${?} - unset run_sdk_container_args + unset -n job_args_ref + + gen_varname sdk_job_state_name + sdk_job_state_declare "${sdk_job_state_name}" + local -n sdk_job_state_ref="${sdk_job_state_name}" + sdk_job_state_ref[SJS_COMMAND_IDX]=${job_args_var_name} + sdk_job_state_ref[SJS_STATE_DIR_IDX]=${sdk_run_state} + sdk_job_state_ref[SJS_KIND_IDX]=${sdk_run_kind} + unset -n sdk_job_state_ref + + sdk_job_state_names+=( "${sdk_job_state_name}" ) + done + + # Create the jobs and kick them off. + local sdk_job_name + for sdk_job_state_name in "${sdk_job_state_names[@]}"; do + local -n sdk_job_state_ref=${sdk_job_state_name} + job_args_var_name=${sdk_job_state_ref[SJS_COMMAND_IDX]} + + gen_varname sdk_job_name + job_declare "${sdk_job_name}" + all_sdk_jobs+=( "${sdk_job_name}" ) + + local -n job_args_ref=${job_args_var_name} + job_run -m "${sdk_job_name}" "${job_args_ref[@]}" + unset -n job_args_ref + + sdk_job_state_ref[SJS_JOB_NAME_IDX]=${sdk_job_name} + unset -n sdk_job_state_ref + done + + # Loop over the current running job states array to check if jobs + # in the array are still alive. The alive jobs will be added to + # the next running job states array that will be looped over (thus + # becoming the "current" array, whereas old "current" array + # becomes "next"). In the meantime gather the output from the jobs + # and print it to the terminal. + local -i current_idx=0 next_idx=1 idx state_count=${#sdk_job_state_names[@]} + local -a sdk_job_state_names_0=( "${sdk_job_state_names[@]}" ) sdk_job_state_names_1=() sdk_job_output_lines=() + local run_loop=x + while [[ state_count -gt 0 ]]; do + local -n sdk_jobs_state_names_ref=sdk_job_state_names_${current_idx} + local -n next_sdk_jobs_state_names_ref=sdk_job_state_names_${next_idx} + next_sdk_jobs_state_names_ref=() + for sdk_job_state_name in "${sdk_jobs_state_names_ref[@]}"; do + local -n sdk_job_state_ref=${sdk_job_state_name} + sdk_job_name=${sdk_job_state_ref[SJS_JOB_NAME_IDX]} + sdk_run_kind=${sdk_job_state_ref[SJS_KIND_IDX]} + unset -n sdk_job_state_ref + if job_is_alive "${sdk_job_name}"; then + next_sdk_jobs_state_names_ref+=( "${sdk_job_state_name}" ) + fi + job_get_output "${sdk_job_name}" sdk_job_output_lines + if [[ ${#sdk_job_output_lines[@]} -gt 0 ]]; then + info_lines "${sdk_job_output_lines[@]/#/${sdk_run_kind}: }" + fi + done + state_count=${#next_sdk_jobs_state_names_ref[@]} + if [[ state_count -gt 0 ]]; then + sleep 0.2 + fi + idx=${current_idx} + current_idx=${next_idx} + next_idx=${idx} + unset -n sdk_jobs_state_names_ref next_sdk_jobs_state_names_ref + done + + # All jobs are done now, so reap them and check if the + # succeeded. If they failed, print the contents of the warning + # files and save the reports they have generated so far in a + # separate place. + local sr_dir_created='' gsr_sdk_run_state_basename + for sdk_job_state_name in "${sdk_job_state_names[@]}"; do + local -n sdk_job_state_ref=${sdk_job_state_name} + sdk_job_name=${sdk_job_state_ref[SJS_JOB_NAME_IDX]} + sdk_run_state=${sdk_job_state_ref[SJS_STATE_DIR_IDX]} + sdk_run_kind=${sdk_job_state_ref[SJS_KIND_IDX]} + unset -n sdk_job_state_ref + job_reap "${sdk_job_name}" rv if [[ ${rv} -ne 0 ]]; then - local salvaged_dir + # Report generation failed, save the generated reports in + # REPORTS_DIR for further examination by the developer. + local salvaged_dir salvaged_dir_sdk salvaged_dir="${REPORTS_DIR}/salvaged-reports" + basename_out "${sdk_run_state}" gsr_sdk_run_state_basename + salvaged_dir_sdk="${salvaged_dir}/${gsr_sdk_run_state_basename}" + if [[ -z ${sr_dir_created} ]]; then + rm -rf "${salvaged_dir}" + mkdir -p "${salvaged_dir}" + sr_dir_created=x + fi { - info "run_sdk_container finished with exit status ${rv}, printing the warnings below for a clue" + info "run_sdk_container for ${sdk_run_kind@Q} finished with exit status ${rv}, printing the warnings below for a clue" info for file in "${sdk_run_state}/pkg-reports/"*'-warnings'; do info "from ${file}:" @@ -1113,43 +1298,50 @@ function generate_sdk_reports() { done info info 'whatever reports generated by the failed run are saved in' - info "${salvaged_dir@Q} directory" + info "${salvaged_dir_sdk@Q} directory" info } >&2 - rm -rf "${salvaged_dir}" - cp -a "${sdk_run_state}/pkg-reports" "${salvaged_dir}" - unset salvaged_dir - fail "copying done, stopping now" - fi - sdk_reports_dir="${WORKDIR}/pkg-reports/${sdk_run_kind}" - top_dir="${sdk_run_state}/pkg-reports" - dir_queue=( "${top_dir}" ) - all_dirs=() - all_files=() - while [[ ${#dir_queue[@]} -gt 0 ]]; do - dir=${dir_queue[0]} - dir_queue=( "${dir_queue[@]:1}" ) - entry=${dir#"${top_dir}"} - if [[ -z ${entry} ]]; then - all_dirs=( "${sdk_reports_dir}" "${all_dirs[@]}" ) - else - entry=${entry#/} - all_dirs=( "${sdk_reports_dir}/${entry}" "${all_dirs[@]}" ) - fi - for full_path in "${dir}/"*; do - if [[ -d ${full_path} ]]; then - dir_queue+=( "${full_path}" ) + cp -a "${sdk_run_state}/pkg-reports" "${salvaged_dir_sdk}" + unset salvaged_dir salvaged_dir_sdk + else + # We succeeded, so move the reports from the state dir to + # workdir and generate cleanups for the moved reports. + sdk_reports_dir="${WORKDIR}/pkg-reports/${sdk_run_kind}" + top_dir="${sdk_run_state}/pkg-reports" + dir_queue=( "${top_dir}" ) + all_dirs=() + all_files=() + while [[ ${#dir_queue[@]} -gt 0 ]]; do + dir=${dir_queue[0]} + dir_queue=( "${dir_queue[@]:1}" ) + entry=${dir#"${top_dir}"} + if [[ -z ${entry} ]]; then + all_dirs=( "${sdk_reports_dir}" "${all_dirs[@]}" ) else - entry=${full_path##"${top_dir}/"} - all_files+=( "${sdk_reports_dir}/${entry}" ) + entry=${entry#/} + all_dirs=( "${sdk_reports_dir}/${entry}" "${all_dirs[@]}" ) fi + for full_path in "${dir}/"*; do + if [[ -d ${full_path} ]]; then + dir_queue+=( "${full_path}" ) + else + entry=${full_path##"${top_dir}/"} + all_files+=( "${sdk_reports_dir}/${entry}" ) + fi + done done - done - add_cleanup \ - "rm -f ${all_files[*]@Q}" \ - "rmdir ${all_dirs[*]@Q}" - mv "${sdk_run_state}/pkg-reports" "${sdk_reports_dir}" + add_cleanup \ + "rm -f ${all_files[*]@Q}" \ + "rmdir ${all_dirs[*]@Q}" + mv "${sdk_run_state}/pkg-reports" "${sdk_reports_dir}" + fi done + sdk_job_state_unset "${sdk_job_state_names[@]}" + # salvaged reports directory was created, means that report + # generation failed + if [[ -n ${sr_dir_created} ]]; then + fail "copying done, stopping now" + fi cp -a "${WORKDIR}/pkg-reports" "${REPORTS_DIR}/reports-from-sdk" } @@ -1717,10 +1909,467 @@ function read_package_sources() { done } -# This monstrosity takes renames map and package tags information, -# reads the reports, does consistency checks and uses the information -# from previous steps to write out package differences between the old -# and new state into the reports directory. +# Fields of the bunch of maps struct. +# +# BOM_PKG_TO_TAGS_MVM_IDX - mapping of a package name to an array of +# tags (mostly describing where the package +# is used, like SDK or azure OEM or python +# sysext) +# +# BOM_PKG_SLOTS_SET_MVM_IDX - mapping of a package name to a set of +# slots available for the package (slots +# before the update and after the update +# are mixed here) +# +# BOM_OLD_PKG_SLOT_VERMINMAX_MAP_MVM_IDX - mapping of a package name +# to used slots, each slot is +# associated with a pair of +# versions - lowest and +# greatest used version (for +# consistent packages, these +# versions are the same); the +# mapping is for packages +# before the update +# +# BOM_NEW_PKG_SLOT_VERMINMAX_MAP_MVM_IDX - same as above, but for +# packages after the update +# +# BOM_PKG_SOURCES_MAP_IDX - mapping of package name to the repository +# name +declare -gri BOM_PKG_TO_TAGS_MVM_IDX=0 BOM_PKG_SLOTS_SET_MVM_IDX=1 BOM_OLD_PKG_SLOT_VERMINMAX_MAP_MVM_IDX=2 BOM_NEW_PKG_SLOT_VERMINMAX_MAP_MVM_IDX=3 BOM_PKG_SOURCES_MAP_IDX=4 + +# Declare bunch of maps variables. +# +# Parameters: +# +# @ - names of variables to be used for bunch of maps +function bunch_of_maps_declare() { + struct_declare -ga "${@}" "( '' '' '' '' '' )" +} + +# Unset bunch of maps variables. +# +# Parameters: +# +# @ - names of bunch of maps variables +function bunch_of_maps_unset() { + unset "${@}" +} + +# Fields of the package output paths struct. +# +# POP_OUT_DIR_IDX - toplevel output directory. +# +# POP_PKG_OUT_DIR_IDX - package-specific output directory under the top-level +# output directory. +# +# POP_PKG_SLOT_OUT_DIR_IDX - slot-specific output directory under the +# package specific output directory. +declare -gri POP_OUT_DIR_IDX=0 POP_PKG_OUT_DIR_IDX=1 POP_PKG_SLOT_OUT_DIR_IDX=2 + +# Declare package output paths variables. +# +# Parameters: +# +# @ - names of variables to be used for package output paths +function package_output_paths_declare() { + struct_declare -ga "${@}" "( '' '' '')" +} + +# Unset package output paths variables. +# +# Parameters: +# +# @ - names of package output paths variables +function package_output_paths_unset() { + unset "${@}" +} + +# Fields of package job state struct. +# +# PJS_JOB_IDX - name of a job variable +# PJS_DIR_IDX - path to job's state directory +declare -gri PJS_JOB_IDX=0 PJS_DIR_IDX=1 + +# Declare package job state variables. +# +# Parameters: +# +# @ - names of variables to be used for package job states +function pkg_job_state_declare() { + struct_declare -ga "${@}" "( '' '' )" +} + +# Unset package job state variables. +# +# Parameters: +# +# @ - names of package job state variables +function pkg_job_state_unset() { + local name job_name + for name; do + local -n pkg_job_state_ref=${name} + job_name=${pkg_job_state_ref[PJS_JOB_IDX]} + if [[ -n ${job_name} ]]; then + job_unset "${job_name}" + fi + unset -n ref + done + unset "${@}" +} + +# Messages used in communication between package jobs and the main +# process. +# +# READYFORMORE is a message that a package job sends to the main +# process when it is done with processing the current batch of +# packages and asks for more. +# +# WEAREDONE is a message that the main process sends to a package job +# when there are no more packages to process, so the job should +# terminate. +declare -gr ready_for_more_msg='READYFORMORE' we_are_done_msg='WEAREDONE' + +# A job function for handling package updates. Receives a batch of +# packages to process, processes them, writes results to a given +# directory and asks for more packages when done. +# +# Parameters: +# +# 1 - output directory for the package handling results +# 2 - name of a bunch of maps variable +function handle_package_changes_job() { + local output_dir=${1}; shift + local bunch_of_maps_var_name=${1}; shift + local we_are_done='' line + local -a reply_lines pair + local -i i pkg_count + + local REPLY + while [[ -z ${we_are_done} ]]; do + echo "${ready_for_more_msg}" + read -r + if [[ ${REPLY} = "${we_are_done_msg}" ]]; then + we_are_done=x + elif [[ ${REPLY} =~ ^[0-9]+$ ]]; then + reply_lines=() + pkg_count=${REPLY} + for ((i = 0; i < pkg_count; ++i)); do + read -r + reply_lines+=( "${REPLY}" ) + done + for line in "${reply_lines[@]}"; do + mapfile -t pair <<<"${line// /$'\n'}" + if [[ ${#pair[@]} -eq 2 ]]; then + handle_one_package_change "${output_dir}" "${bunch_of_maps_var_name}" "${pair[@]}" + else + echo "invalid message received: ${line@Q}, expected a pair of package names" + fi + done + else + echo "invalid message received: ${REPLY@Q}, expected a number or ${we_are_done_msg@Q}" + fi + done + return 0 +} + +# Generate a report about one, possibly renamed, package. This +# includes generating diffs between old and new ebuilds for each of +# used slot of a package, diffs of other files the package has, some +# automatic reports based on md5-cache entries along with summary and +# changelog stubs. +# +# Parameters: +# +# 1 - the main output directory, the generated output will end up in +# some subdirectories +# 2 - bunch of maps (generally, package information) +# 3 - old package name +# 4 - new package name +function handle_one_package_change() { + local output_dir=${1}; shift + local -n bunch_of_maps_ref=${1}; shift + local old_name=${1}; shift + local new_name=${1}; shift + + local warnings_dir="${output_dir}/warnings" + local updates_dir="${output_dir}/updates" + + local pkg_to_tags_mvm_var_name=${bunch_of_maps_ref[BOM_PKG_TO_TAGS_MVM_IDX]} + local pkg_slots_set_mvm_var_name=${bunch_of_maps_ref[BOM_PKG_SLOTS_SET_MVM_IDX]} + local old_pkg_slot_verminmax_map_mvm_var_name=${bunch_of_maps_ref[BOM_OLD_PKG_SLOT_VERMINMAX_MAP_MVM_IDX]} + local new_pkg_slot_verminmax_map_mvm_var_name=${bunch_of_maps_ref[BOM_NEW_PKG_SLOT_VERMINMAX_MAP_MVM_IDX]} + local -n pkg_sources_map_ref=${bunch_of_maps_ref[BOM_PKG_SOURCES_MAP_IDX]} + + # The function goes over a pair of old and new package names. For + # each name there will be some checks done (like does this package + # even exist). Each name in the pair has a set of used slots + # associated with it (the most common situation is that each have + # just one slot, but there are some packages that we have multiple + # slots installed, like app-text/docbook-xml-dtd). Some of the + # slots will appear in both old and new package name, sometimes + # there will be slots available only in the old state or only in + # the new state. Each slot for each package name has an associated + # min version and max version. So for common slots we usually + # compare min version for old package with max version for new + # package. Any inconsistencies with the versions should be + # reported by now. There are some edge cases with the slots that + # are not handled by the automation - in such cases there will be + # a "manual action needed" report. + + if [[ ${old_name} = "${new_name}" ]]; then + info "handling update of ${new_name}" + else + info "handling update of ${new_name} (renamed from ${old_name})" + fi + pkg_debug_enable "${old_name}" "${new_name}" + pkg_debug 'handling updates' + local old_repo=${pkg_sources_map_ref["${old_name}"]:-} + local new_repo=${pkg_sources_map_ref["${new_name}"]:-} + if [[ -z ${old_repo} ]]; then + pkg_warn_d "${warnings_dir}" \ + '- package not in old state' \ + " - old package: ${old_name}" \ + " - new package: ${new_name}" + pkg_debug_disable + return 0 + fi + if [[ -z ${new_repo} ]]; then + pkg_warn_d "${warnings_dir}" \ + '- package not in new state' \ + " - old package: ${old_name}" \ + " - new package: ${new_name}" + pkg_debug_disable + return 0 + fi + if [[ ${old_repo} != "${new_repo}" ]]; then + # This is pretty much an arbitrary limitation and I don't + # remember any more why we have it. + pkg_warn_d "${warnings_dir}" \ + '- package has moved between repos? unsupported for now' \ + " - old package and repo: ${old_name} ${old_repo}" \ + " - new package and repo: ${new_name} ${new_repo}" + pkg_debug_disable + return 0 + fi + if [[ ${new_repo} != 'portage-stable' ]]; then + # coreos-overlay packages will need a separate handling + pkg_debug 'not a portage-stable package' + pkg_debug_disable + return 0 + fi + + local hopc_old_slots_set_var_name hopc_new_slots_set_var_name + mvm_get "${pkg_slots_set_mvm_var_name}" "${old_name}" hopc_old_slots_set_var_name + mvm_get "${pkg_slots_set_mvm_var_name}" "${new_name}" hopc_new_slots_set_var_name + : "${hopc_old_slots_set_var_name:=EMPTY_MAP}" + : "${hopc_new_slots_set_var_name:=EMPTY_MAP}" + + local hopc_old_slot_verminmax_map_var_name hopc_new_slot_verminmax_map_var_name + mvm_get "${old_pkg_slot_verminmax_map_mvm_var_name}" "${old_name}" hopc_old_slot_verminmax_map_var_name + mvm_get "${new_pkg_slot_verminmax_map_mvm_var_name}" "${new_name}" hopc_new_slot_verminmax_map_var_name + : "${hopc_old_slot_verminmax_map_var_name:=EMPTY_MAP}" + : "${hopc_new_slot_verminmax_map_var_name:=EMPTY_MAP}" + local -n old_slot_verminmax_map_ref=${hopc_old_slot_verminmax_map_var_name} + local -n new_slot_verminmax_map_ref=${hopc_new_slot_verminmax_map_var_name} + + # Filter out slots for old and new package name that comes out + # without versions. This may happen, because we collect all slot + # names for the package name, without differentiating whether such + # a slot existed in the old state or still exists in the new + # state. If slot didn't exist in either one then it will come + # without version information. Such a slot is dropped. An example + # would be an update of sys-devel/binutils from 2.42 to 2.43. Each + # binutils version has a separate slot which is named after the + # version. So the slots set would be (2.42 2.43). Slot "2.42" does + # not exist in the new state any more, "2.43" does not yet exist + # in the old state. So those slots for those states will be + # dropped. Thus filtered slots set for the old state will only + # contain 2.42, while for the new state - only 2.43. + local which slots_set_var_name_var_name slot_verminmax_map_var_name_var_name filtered_slots_set_var_name s verminmax + local -A hopc_old_filtered_slots_set hopc_new_filtered_slots_set + for which in "${WHICH[@]}"; do + slots_set_var_name_var_name="hopc_${which}_slots_set_var_name" + slot_verminmax_map_var_name_var_name="hopc_${which}_slot_verminmax_map_var_name" + filtered_slots_set_var_name="hopc_${which}_filtered_slots_set" + local -n which_slots_set_ref=${!slots_set_var_name_var_name} + local -n which_slot_verminmax_map_ref=${!slot_verminmax_map_var_name_var_name} + local -n which_filtered_slots_set_ref=${filtered_slots_set_var_name} + pkg_debug "all unfiltered slots for ${which} name: ${!which_slots_set_ref[*]}" + which_filtered_slots_set_ref=() + for s in "${!which_slots_set_ref[@]}"; do + verminmax=${which_slot_verminmax_map_ref["${s}"]:-} + if [[ -n ${verminmax} ]]; then + which_filtered_slots_set_ref["${s}"]=x + fi + done + pkg_debug "all filtered slots for ${which} name: ${!which_filtered_slots_set_ref[*]}" + unset -n which_filtered_slots_set_ref + unset -n which_slot_verminmax_map_ref + unset -n which_slots_set_ref + done + + local -A hopc_only_old_slots_set=() hopc_only_new_slots_set=() hopc_common_slots_set=() + sets_split \ + hopc_old_filtered_slots_set hopc_new_filtered_slots_set \ + hopc_only_old_slots_set hopc_only_new_slots_set hopc_common_slots_set + pkg_debug "all common slots: ${!hopc_common_slots_set[*]}" + pkg_debug "slots only for old name: ${!hopc_only_old_slots_set[*]}" + pkg_debug "slots only for new name: ${!hopc_only_new_slots_set[*]}" + + local update_dir_non_slot="${updates_dir}/${new_name}" + mkdir -p "${update_dir_non_slot}" + + package_output_paths_declare hopc_package_output_paths + hopc_package_output_paths[POP_OUT_DIR_IDX]=${updates_dir} + hopc_package_output_paths[POP_PKG_OUT_DIR_IDX]=${update_dir_non_slot} + # POP_PKG_SLOT_OUT_DIR_IDX will be set in loops below + + generate_non_ebuild_diffs "${update_dir_non_slot}" "${OLD_PORTAGE_STABLE}" "${NEW_PORTAGE_STABLE}" "${old_name}" "${new_name}" + generate_full_diffs "${update_dir_non_slot}" "${OLD_PORTAGE_STABLE}" "${NEW_PORTAGE_STABLE}" "${old_name}" "${new_name}" + generate_package_mention_reports "${update_dir_non_slot}" "${NEW_STATE}" "${old_name}" "${new_name}" + + local hopc_changed='' + local old_verminmax new_verminmax + local hopc_slot_dirname + local update_dir + local old_version new_version + local hopc_cmp_result hopc_slot_changed + pkg_debug 'going over common slots' + for s in "${!hopc_common_slots_set[@]}"; do + old_verminmax=${old_slot_verminmax_map_ref["${s}"]:-} + new_verminmax=${new_slot_verminmax_map_ref["${s}"]:-} + pkg_debug "slot: ${s}, vmm old: ${old_verminmax}, vmm new: ${new_verminmax}" + if [[ -z "${old_verminmax}" ]] || [[ -z "${new_verminmax}" ]]; then + devel_warn_d "${warnings_dir}" \ + "- no minmax info available for old and/or new:" \ + " - old package: ${old_name}" \ + " - slot: ${s}" \ + " - minmax: ${old_verminmax}" \ + " - new package: ${new_name}" \ + " - slot: ${s}" \ + " - minmax: ${new_verminmax}" + continue + fi + slot_dirname "${s}" "${s}" hopc_slot_dirname + update_dir="${update_dir_non_slot}/${hopc_slot_dirname}" + mkdir -p "${update_dir}" + hopc_package_output_paths[POP_PKG_SLOT_OUT_DIR_IDX]=${update_dir} + old_version=${old_verminmax%%:*} + new_version=${new_verminmax##*:} + gentoo_ver_cmp_out "${new_version}" "${old_version}" hopc_cmp_result + case ${hopc_cmp_result} in + "${GV_GT}") + handle_pkg_update hopc_package_output_paths "${pkg_to_tags_mvm_var_name}" "${old_name}" "${new_name}" "${old_version}" "${new_version}" + hopc_changed=x + ;; + "${GV_EQ}") + hopc_slot_changed= + handle_pkg_as_is hopc_package_output_paths "${pkg_to_tags_mvm_var_name}" "${old_name}" "${new_name}" "${old_version}" hopc_slot_changed + if [[ -z ${hopc_slot_changed} ]]; then + rm -rf "${update_dir}" + else + hopc_changed=x + fi + ;; + "${GV_LT}") + handle_pkg_downgrade hopc_package_output_paths "${pkg_to_tags_mvm_var_name}" "${old_name}" "${new_name}" "${s}" "${s}" "${old_version}" "${new_version}" + hopc_changed=x + ;; + esac + done + + # A "sys-devel/binutils update" case - one old slot and one new + # slot, but different from each other. + local hopc_old_s hopc_new_s + if [[ ${#hopc_only_old_slots_set[@]} -eq 1 ]] && [[ ${#hopc_only_new_slots_set[@]} -eq 1 ]]; then + get_first_from_set hopc_only_old_slots_set hopc_old_s + old_verminmax=${old_slot_verminmax_map_ref["${hopc_old_s}"]:-} + get_first_from_set hopc_only_new_slots_set hopc_new_s + new_verminmax=${new_slot_verminmax_map_ref["${hopc_new_s}"]:-} + pkg_debug "jumping from slot ${hopc_old_s} (vmm: ${old_verminmax}) to slot ${hopc_new_s} (vmm: ${new_verminmax})" + if [[ -z "${old_verminmax}" ]] || [[ -z "${new_verminmax}" ]]; then + devel_warn_d "${warnings_dir}" \ + "- no verminmax info available for old and/or new:" \ + " - old package: ${old_name}" \ + " - slot: ${hopc_old_s}" \ + " - minmax: ${old_verminmax}" \ + " - new package: ${new_name}" \ + " - slot: ${hopc_new_s}" \ + " - minmax: ${new_verminmax}" + else + slot_dirname "${hopc_old_s}" "${hopc_new_s}" hopc_slot_dirname + update_dir="${update_dir_non_slot}/${hopc_slot_dirname}" + mkdir -p "${update_dir}" + hopc_package_output_paths[POP_PKG_SLOT_OUT_DIR_IDX]=${update_dir} + old_version=${old_verminmax%%:*} + new_version=${new_verminmax##*:} + gentoo_ver_cmp_out "${new_version}" "${old_version}" hopc_cmp_result + case ${hopc_cmp_result} in + "${GV_GT}") + handle_pkg_update hopc_package_output_paths "${pkg_to_tags_mvm_var_name}" "${old_name}" "${new_name}" "${old_version}" "${new_version}" + hopc_changed=x + ;; + "${GV_EQ}") + hopc_slot_changed= + handle_pkg_as_is hopc_package_output_paths "${pkg_to_tags_mvm_var_name}" "${old_name}" "${new_name}" "${old_version}" hopc_slot_changed + if [[ -z ${hopc_slot_changed} ]]; then + rm -rf "${update_dir}" + else + hopc_changed=x + fi + ;; + "${GV_LT}") + handle_pkg_downgrade hopc_package_output_paths "${pkg_to_tags_mvm_var_name}" "${old_name}" "${new_name}" "${hopc_old_s}" "${hopc_new_s}" "${old_version}" "${new_version}" + hopc_changed=x + ;; + esac + fi + elif [[ ${#hopc_only_old_slots_set[@]} -gt 0 ]] || [[ ${#hopc_only_new_slots_set[@]} -gt 0 ]]; then + pkg_debug 'complicated slots situation, needs manual intervention' + local -a lines=( + '- handle package update:' + ' - old package name:' + " - name: ${old_name}" + ' - slots:' + ) + for s in "${!hopc_old_filtered_slots_set[@]}"; do + old_verminmax=${old_slot_verminmax_map_ref["${s}"]:-} + lines+=(" - ${s}, minmax: ${old_verminmax}") + done + lines+=( + ' - new package name:' + " - name: ${new_name}" + ' - slots:' + ) + for s in "${!hopc_new_filtered_slots_set[@]}"; do + new_verminmax=${new_slot_verminmax_map_ref["${s}"]:-} + lines+=(" - ${s}, minmax: ${new_verminmax}") + done + manual_d "${warnings_dir}" "${lines[@]}" + unset lines + fi + + package_output_paths_unset hopc_package_output_paths + unset -n new_slot_verminmax_map_ref old_slot_verminmax_map_ref + # if nothing changed, drop the entire update directory for the + # package, and possibly the parent directory if it became + # empty (parent directory being a category directory, like + # sys-apps) + local hopc_category_dir + if [[ -z ${hopc_changed} ]]; then + pkg_debug 'no changes, dropping reports' + rm -rf "${update_dir_non_slot}" + dirname_out "${update_dir_non_slot}" hopc_category_dir + if dir_is_empty "${hopc_category_dir}"; then + rmdir "${hopc_category_dir}" + fi + fi + pkg_debug_disable +} + +# Reads the reports, does consistency checks, runs jobs to process all +# the packages, and writes out reports into the reports directory. # # Params: # @@ -1741,6 +2390,8 @@ function handle_package_changes() { mvm_declare hpc_pkg_slots_set_mvm mvm_mvc_set read_reports hpc_all_pkgs hpc_pkg_slots_set_mvm + info "doing package consistency checks" + # map[package]map[slot]string (string being "min version:max version") mvm_declare hpc_old_pkg_slot_verminmax_map_mvm mvm_mvc_map mvm_declare hpc_new_pkg_slot_verminmax_map_mvm mvm_mvc_map @@ -1749,14 +2400,14 @@ function handle_package_changes() { unset_report_mvms + info "preparing for handling package changes" + # TODO: when we handle moving packages between repos, then there # should be two maps, for old and new state local -A hpc_package_sources_map hpc_package_sources_map=() read_package_sources hpc_package_sources_map - mkdir -p "${REPORTS_DIR}/updates" - local -a old_pkgs new_pkgs old_pkgs=() new_pkgs=() @@ -1836,275 +2487,172 @@ function handle_package_changes() { done unset added_pkg_to_index_map - # The loop below goes over the pairs of old and new package - # names. For each name there will be some checks done (like does - # this package even exist). Each name in the pair has a set of - # used slots associated with it (the most common situation is that - # each have just one slot, but there are some packages that we - # have multiple slots installed, like - # app-text/docbook-xml-dtd). Some of the slots will appear in both - # old and new package name, sometimes there will be slots - # available only in the old state or only in the new state. Each - # slot for each package name has an associated min version and max - # version. So for common slots we usually compare min version for - # old package with max version for new package. Any - # inconsistencies with the versions should be reported by - # now. There are some edge cases with the slots that are not - # handled by the automation - in such cases there will be a - # "manual action needed" report. - - local pkg_idx=0 - local old_name new_name old_repo new_repo - local hpc_old_slots_set_var_name hpc_new_slots_set_var_name - local hpc_old_slot_verminmax_map_var_name hpc_new_slot_verminmax_map_var_name - local s hpc_old_s hpc_new_s - local old_verminmax new_verminmax - local old_version new_version - local hpc_cmp_result - local -A hpc_only_old_slots_set hpc_only_new_slots_set hpc_common_slots_set - local -a lines - local hpc_update_dir - local -A empty_map_or_set - local hpc_changed hpc_slot_changed hpc_update_dir_non_slot hpc_category_dir - local which slots_set_var_name_var_name slot_verminmax_map_var_name_var_name filtered_slots_set_var_name verminmax - local -A hpc_old_filtered_slots_set hpc_new_filtered_slots_set - empty_map_or_set=() - while [[ ${pkg_idx} -lt ${#old_pkgs[@]} ]]; do - old_name=${old_pkgs["${pkg_idx}"]} - new_name=${new_pkgs["${pkg_idx}"]} - if [[ ${old_name} = "${new_name}" ]]; then - info "handling update of ${new_name}" - else - info "handling update of ${new_name} (renamed from ${old_name})" - fi - pkg_debug_enable "${old_name}" "${new_name}" - pkg_debug 'handling updates' - pkg_idx=$((pkg_idx + 1)) - old_repo=${hpc_package_sources_map["${old_name}"]:-} - new_repo=${hpc_package_sources_map["${new_name}"]:-} - if [[ -z ${old_repo} ]]; then - pkg_warn \ - '- package not in old state' \ - " - old package: ${old_name}" \ - " - new package: ${new_name}" - pkg_debug_disable - continue - fi - if [[ -z ${new_repo} ]]; then - pkg_warn \ - '- package not in new state' \ - " - old package: ${old_name}" \ - " - new package: ${new_name}" - pkg_debug_disable - continue - fi - if [[ ${old_repo} != "${new_repo}" ]]; then - # This is pretty much an arbitrary limitation and I don't - # remember any more why we have it. - pkg_warn \ - '- package has moved between repos? unsupported for now' \ - " - old package and repo: ${old_name} ${old_repo}" \ - " - new package and repo: ${new_name} ${new_repo}" - pkg_debug_disable - continue - fi - if [[ ${new_repo} != 'portage-stable' ]]; then - # coreos-overlay packages will need a separate handling - pkg_debug 'not a portage-stable package' - pkg_debug_disable - continue - fi + bunch_of_maps_declare hpc_bunch_of_maps + hpc_bunch_of_maps[BOM_PKG_TO_TAGS_MVM_IDX]=${pkg_to_tags_mvm_var_name} + hpc_bunch_of_maps[BOM_PKG_SLOTS_SET_MVM_IDX]=hpc_pkg_slots_set_mvm + hpc_bunch_of_maps[BOM_OLD_PKG_SLOT_VERMINMAX_MAP_MVM_IDX]=hpc_old_pkg_slot_verminmax_map_mvm + hpc_bunch_of_maps[BOM_NEW_PKG_SLOT_VERMINMAX_MAP_MVM_IDX]=hpc_new_pkg_slot_verminmax_map_mvm + hpc_bunch_of_maps[BOM_PKG_SOURCES_MAP_IDX]=hpc_package_sources_map - mvm_get hpc_pkg_slots_set_mvm "${old_name}" hpc_old_slots_set_var_name - mvm_get hpc_pkg_slots_set_mvm "${new_name}" hpc_new_slots_set_var_name - : "${hpc_old_slots_set_var_name:=empty_map_or_set}" - : "${hpc_new_slots_set_var_name:=empty_map_or_set}" - mvm_get hpc_old_pkg_slot_verminmax_map_mvm "${old_name}" hpc_old_slot_verminmax_map_var_name - mvm_get hpc_new_pkg_slot_verminmax_map_mvm "${new_name}" hpc_new_slot_verminmax_map_var_name - : "${hpc_old_slot_verminmax_map_var_name:=empty_map_or_set}" - : "${hpc_new_slot_verminmax_map_var_name:=empty_map_or_set}" - local -n old_slot_verminmax_map_ref=${hpc_old_slot_verminmax_map_var_name} - local -n new_slot_verminmax_map_ref=${hpc_new_slot_verminmax_map_var_name} - - # Filter out slots for old and new package name that comes out - # without versions. This may happen, because we collect all - # slot names for the package name, without differentiating - # whether such a slot existed in the old state or still exists - # in the new state. If slot didn't exist in either one then it - # will come without version information. Such a slot is - # dropped. An example would be an update of sys-devel/binutils - # from 2.42 to 2.43. Each binutils version has a separate slot - # which is named after the version. So the slots set would be - # (2.42 2.43). Slot "2.42" does not exist in the new state any - # more, "2.43" does not yet exist in the old state. So those - # slots for those states will be dropped. Thus filtered slots - # set for the old state will only contain 2.42, while for the - # new state - only 2.43. - for which in old new; do - slots_set_var_name_var_name="hpc_${which}_slots_set_var_name" - slot_verminmax_map_var_name_var_name="hpc_${which}_slot_verminmax_map_var_name" - filtered_slots_set_var_name="hpc_${which}_filtered_slots_set" - local -n which_slots_set_ref=${!slots_set_var_name_var_name} - local -n which_slot_verminmax_map_ref=${!slot_verminmax_map_var_name_var_name} - local -n which_filtered_slots_set_ref=${filtered_slots_set_var_name} - pkg_debug "all unfiltered slots for ${which} name: ${!which_slots_set_ref[*]}" - which_filtered_slots_set_ref=() - for s in "${!which_slots_set_ref[@]}"; do - verminmax=${which_slot_verminmax_map_ref["${s}"]:-} - if [[ -n ${verminmax} ]]; then - which_filtered_slots_set_ref["${s}"]=x - fi - done - pkg_debug "all filtered slots for ${which} name: ${!which_filtered_slots_set_ref[*]}" - unset -n which_filtered_slots_set_ref - unset -n which_slot_verminmax_map_ref - unset -n which_slots_set_ref + # We will be spawning as many jobs below as there are available + # processors/cores. Each job has its own work directory and will + # be receiving packages to process in batches of five. Once all + # the packages are processed, their reports are aggregated into a + # single one. + local -i pkg_batch_size=5 this_batch_size pkg_idx=0 pkg_count=${#old_pkgs[@]} + local -a pkg_batch old_pkgs_batch new_pkgs_batch + + local pkg_job_top_dir="${WORKDIR}/pkgjobdirs" + create_cleanup_dir "${pkg_job_top_dir}" + + local -a pkg_job_state_names=() + local pkg_job_state_name pkg_job_name pkg_job_dir pkg_job_warnings_dir pkg_job_updates_dir + + local -i job_count i + get_num_proc job_count + + local file + local -a paths + + # Set up environment for each job, create a job state and kick off + # the job. + for ((i = 0; i < job_count; ++i)); do + gen_varname pkg_job_state_name + pkg_job_state_declare "${pkg_job_state_name}" + gen_varname pkg_job_name + job_declare "${pkg_job_name}" + + pkg_job_dir="${pkg_job_top_dir}/j${i}" + pkg_job_warnings_dir="${pkg_job_dir}/warnings" + pkg_job_updates_dir="${pkg_job_dir}/updates" + create_cleanup_dir "${pkg_job_dir}" + create_cleanup_dir "${pkg_job_warnings_dir}" + create_cleanup_dir "${pkg_job_updates_dir}" + paths=() + for file in developer-warnings warnings manual-work-needed; do + paths+=( "${pkg_job_dir}/warnings/${file}" ) + done + for file in summary_stubs changelog_stubs; do + paths+=( "${pkg_job_dir}/updates/${file}" ) done + # TODO: That's a bit messy + add_cleanup "find -P ${pkg_job_updates_dir@Q} -mindepth 1 -maxdepth 1 -type d -exec rm -rf {} +" + add_cleanup "rm -f ${paths[*]@Q}" - hpc_only_old_slots_set=() - hpc_only_new_slots_set=() - hpc_common_slots_set=() - sets_split \ - hpc_old_filtered_slots_set hpc_new_filtered_slots_set \ - hpc_only_old_slots_set hpc_only_new_slots_set hpc_common_slots_set - pkg_debug "all common slots: ${!hpc_common_slots_set[*]}" - pkg_debug "slots only for old name: ${!hpc_only_old_slots_set[*]}" - pkg_debug "slots only for new name: ${!hpc_only_new_slots_set[*]}" - - update_dir_non_slot "${new_name}" hpc_update_dir_non_slot - mkdir -p "${hpc_update_dir_non_slot}" - - generate_non_ebuild_diffs "${OLD_PORTAGE_STABLE}" "${NEW_PORTAGE_STABLE}" "${old_name}" "${new_name}" - generate_full_diffs "${OLD_PORTAGE_STABLE}" "${NEW_PORTAGE_STABLE}" "${old_name}" "${new_name}" - generate_package_mention_reports "${NEW_STATE}" "${old_name}" "${new_name}" - - hpc_changed= - pkg_debug 'going over common slots' - for s in "${!hpc_common_slots_set[@]}"; do - old_verminmax=${old_slot_verminmax_map_ref["${s}"]:-} - new_verminmax=${new_slot_verminmax_map_ref["${s}"]:-} - pkg_debug "slot: ${s}, vmm old: ${old_verminmax}, vmm new: ${new_verminmax}" - if [[ -z "${old_verminmax}" ]] || [[ -z "${new_verminmax}" ]]; then - devel_warn \ - "- no minmax info available for old and/or new:" \ - " - old package: ${old_name}" \ - " - slot: ${s}" \ - " - minmax: ${old_verminmax}" \ - " - new package: ${new_name}" \ - " - slot: ${s}" \ - " - minmax: ${new_verminmax}" - continue + job_run -m "${pkg_job_name}" handle_package_changes_job "${pkg_job_dir}" hpc_bunch_of_maps + + local -n pkg_job_state_ref="${pkg_job_state_name}" + pkg_job_state_ref[PJS_JOB_IDX]=${pkg_job_name} + pkg_job_state_ref[PJS_DIR_IDX]=${pkg_job_dir} + unset -n pkg_job_state_ref + pkg_job_state_names+=( "${pkg_job_state_name}" ) + done + + # We have two job arrays, "current" and "next". When iterating the + # "current" array, we will be putting all still alive jobs into + # the "next" array, which will become "current" in the next + # iteration. In every iteration we collect the output and send + # another batch of packages to be processed by a job if it's + # ready. We terminate the jobs when we have run out of + # packages. The looping finishes when all the jobs are terminated. + local -i current_idx=0 next_idx=1 idx state_count=${#pkg_job_state_names[@]} + local -a pkg_job_state_names_0=( "${pkg_job_state_names[@]}" ) pkg_job_state_names_1=() pkg_job_output_lines + local pkg_job_output_line pkg_job_input_sent + while [[ state_count -gt 0 ]]; do + local -n pkg_job_state_names_ref=pkg_job_state_names_${current_idx} + local -n next_pkg_job_state_names_ref=pkg_job_state_names_${next_idx} + + next_pkg_job_state_names_ref=() + for pkg_job_state_name in "${pkg_job_state_names_ref[@]}"; do + local -n pkg_job_state_ref=${pkg_job_state_name} + pkg_job_name=${pkg_job_state_ref[PJS_JOB_IDX]} + unset -n pkg_job_state_ref + if job_is_alive "${pkg_job_name}"; then + next_pkg_job_state_names_ref+=( "${pkg_job_state_name}" ) fi - update_dir "${new_name}" "${s}" "${s}" hpc_update_dir - mkdir -p "${hpc_update_dir}" - old_version=${old_verminmax%%:*} - new_version=${new_verminmax##*:} - gentoo_ver_cmp_out "${new_version}" "${old_version}" hpc_cmp_result - case ${hpc_cmp_result} in - "${GV_GT}") - handle_pkg_update "${pkg_to_tags_mvm_var_name}" "${old_name}" "${new_name}" "${s}" "${s}" "${old_version}" "${new_version}" - hpc_changed=x - ;; - "${GV_EQ}") - hpc_slot_changed= - handle_pkg_as_is "${pkg_to_tags_mvm_var_name}" "${old_name}" "${new_name}" "${s}" "${s}" "${old_version}" hpc_slot_changed - if [[ -z ${hpc_slot_changed} ]]; then - rm -rf "${hpc_update_dir}" - else - hpc_changed=x - fi - ;; - "${GV_LT}") - handle_pkg_downgrade "${pkg_to_tags_mvm_var_name}" "${old_name}" "${new_name}" "${s}" "${s}" "${old_version}" "${new_version}" - hpc_changed=x - ;; - esac - done - # A "sys-devel/binutils update" case - one old slot and one - # new slot, but different from each other. - if [[ ${#hpc_only_old_slots_set[@]} -eq 1 ]] && [[ ${#hpc_only_new_slots_set[@]} -eq 1 ]]; then - get_first_from_set hpc_only_old_slots_set hpc_old_s - old_verminmax=${old_slot_verminmax_map_ref["${hpc_old_s}"]:-} - get_first_from_set hpc_only_new_slots_set hpc_new_s - new_verminmax=${new_slot_verminmax_map_ref["${hpc_new_s}"]:-} - pkg_debug "jumping from slot ${hpc_old_s} (vmm: ${old_verminmax}) to slot ${hpc_new_s} (vmm: ${new_verminmax})" - if [[ -z "${old_verminmax}" ]] || [[ -z "${new_verminmax}" ]]; then - devel_warn \ - "- no verminmax info available for old and/or new:" \ - " - old package: ${old_name}" \ - " - slot: ${hpc_old_s}" \ - " - minmax: ${old_verminmax}" \ - " - new package: ${new_name}" \ - " - slot: ${hpc_new_s}" \ - " - minmax: ${new_verminmax}" - else - update_dir "${new_name}" "${hpc_old_s}" "${hpc_new_s}" hpc_update_dir - mkdir -p "${hpc_update_dir}" - old_version=${old_verminmax%%:*} - new_version=${new_verminmax##*:} - gentoo_ver_cmp_out "${new_version}" "${old_version}" hpc_cmp_result - case ${hpc_cmp_result} in - "${GV_GT}") - handle_pkg_update "${pkg_to_tags_mvm_var_name}" "${old_name}" "${new_name}" "${hpc_old_s}" "${hpc_new_s}" "${old_version}" "${new_version}" - hpc_changed=x - ;; - "${GV_EQ}") - hpc_slot_changed= - handle_pkg_as_is "${pkg_to_tags_mvm_var_name}" "${old_name}" "${new_name}" "${hpc_old_s}" "${hpc_new_s}" "${old_version}" hpc_slot_changed - if [[ -z ${hpc_slot_changed} ]]; then - rm -rf "${hpc_update_dir}" + job_get_output "${pkg_job_name}" pkg_job_output_lines + pkg_job_input_sent= + for pkg_job_output_line in "${pkg_job_output_lines[@]}"; do + if [[ ${pkg_job_output_line} = "${ready_for_more_msg}" ]]; then + if [[ -z ${pkg_job_input_sent} ]]; then + if [[ pkg_idx -ge pkg_count ]]; then + job_send_input "${pkg_job_name}" "${we_are_done_msg}" else - hpc_changed=x + old_pkgs_batch=( "${old_pkgs[@]:pkg_idx:pkg_batch_size}" ) + new_pkgs_batch=( "${new_pkgs[@]:pkg_idx:pkg_batch_size}" ) + this_batch_size=${#old_pkgs_batch[@]} + pkg_batch=( "${this_batch_size}" ) + for ((i = 0; i < this_batch_size; ++i)); do + old_pkg=${old_pkgs_batch[i]} + new_pkg=${new_pkgs_batch[i]} + pkg_batch+=( "${old_pkg} ${new_pkg}" ) + done + pkg_idx=$((pkg_idx + pkg_batch_size)) + job_send_input "${pkg_job_name}" "${pkg_batch[@]}" fi - ;; - "${GV_LT}") - handle_pkg_downgrade "${pkg_to_tags_mvm_var_name}" "${old_name}" "${new_name}" "${hpc_old_s}" "${hpc_new_s}" "${old_version}" "${new_version}" - hpc_changed=x - ;; - esac - fi - elif [[ ${#hpc_only_old_slots_set[@]} -gt 0 ]] || [[ ${#hpc_only_new_slots_set[@]} -gt 0 ]]; then - pkg_debug 'complicated slots situation, needs manual intervention' - lines=( - '- handle package update:' - ' - old package name:' - " - name: ${old_name}" - ' - slots:' - ) - for s in "${!hpc_old_filtered_slots_set[@]}"; do - old_verminmax=${old_slot_verminmax_map_ref["${s}"]:-} - lines+=(" - ${s}, minmax: ${old_verminmax}") - done - lines+=( - ' - new package name:' - " - name: ${new_name}" - ' - slots:' - ) - for s in "${!hpc_new_filtered_slots_set[@]}"; do - new_verminmax=${new_slot_verminmax_map_ref["${s}"]:-} - lines+=(" - ${s}, minmax: ${new_verminmax}") + pkg_job_input_sent=x + fi + else + # The job already used info to print this line, so + # we should just echo it, otherwise we will get + # repeated prefixes ("script_name: script_name: + # something happenend") + echo "${pkg_job_output_line}" + fi done - manual "${lines[@]}" + done + state_count=${#next_pkg_job_state_names_ref[@]} + if [[ state_count -gt 0 ]]; then + sleep 0.2 fi - unset -n new_slot_verminmax_map_ref old_slot_verminmax_map_ref - # if nothing changed, drop the entire update directory for the - # package, and possibly the parent directory if it became - # empty (parent directory being a category directory, like - # sys-apps) - if [[ -z ${hpc_changed} ]]; then - pkg_debug 'no changes, dropping reports' - rm -rf "${hpc_update_dir_non_slot}" - dirname_out "${hpc_update_dir_non_slot}" hpc_category_dir - if dir_is_empty "${hpc_category_dir}"; then - rmdir "${hpc_category_dir}" - fi + + unset -n pkg_job_state_names_ref next_pkg_job_state_names_ref + idx=${current_idx} + current_idx=${next_idx} + next_idx=${idx} + done + + # All the jobs are done, so here we collect all their reports and + # merge them into the main ones in reports directory. + local some_job_failed='' hpc_filename + local -i hpc_rv + truncate --size=0 "${REPORTS_DIR}/updates/summary_stubs" "${REPORTS_DIR}/updates/changelog_stubs" + for pkg_job_state_name in "${pkg_job_state_names[@]}"; do + local -n pkg_job_state_ref=${pkg_job_state_name} + pkg_job_name=${pkg_job_state_ref[PJS_JOB_IDX]} + pkg_job_dir=${pkg_job_state_ref[PJS_DIR_IDX]} + unset -n pkg_job_state_ref + job_reap "${pkg_job_name}" hpc_rv + if [[ hpc_rv -ne 0 ]]; then + some_job_failed=x fi - pkg_debug_disable + for file in "${pkg_job_dir}/warnings/"*; do + basename_out "${file}" hpc_filename + cat "${file}" >>"${REPORTS_DIR}/${hpc_filename}" + done + for file in "${pkg_job_dir}/updates/"*; do + basename_out "${file}" hpc_filename + if [[ -f ${file} ]]; then + cat "${file}" >>"${REPORTS_DIR}/updates/${hpc_filename}" + elif [[ -d ${file} ]]; then + if [[ ! -d "${REPORTS_DIR}/updates/${hpc_filename}" ]]; then + mkdir -p "${REPORTS_DIR}/updates/${hpc_filename}" + fi + mv "${file}/"* "${REPORTS_DIR}/updates/${hpc_filename}" + fi + done done + pkg_job_state_unset "${pkg_job_state_names[@]}" + bunch_of_maps_unset hpc_bunch_of_maps + mvm_unset hpc_new_pkg_slot_verminmax_map_mvm mvm_unset hpc_old_pkg_slot_verminmax_map_mvm mvm_unset hpc_pkg_slots_set_mvm + + if [[ -n ${some_job_failed} ]]; then + fail "some job failed" + fi } # Gets the first item from the passed set. @@ -2133,42 +2681,33 @@ function get_first_from_set() { # # Params: # -# 1 - name of the package tags set mvm variable -# 2 - old package name -# 3 - new package name -# 4 - old package slot -# 5 - new package slot -# 6 - old version -# 7 - new version +# 1 - package output paths variable name +# 2 - name of the package tags set mvm variable +# 3 - old package name +# 4 - new package name +# 5 - old version +# 6 - new version function handle_pkg_update() { - local pkg_to_tags_mvm_var_name old_pkg new_pkg old_s new_s old new - pkg_to_tags_mvm_var_name=${1}; shift - old_pkg=${1}; shift - new_pkg=${1}; shift - old_s=${1}; shift - new_s=${1}; shift - old=${1}; shift - new=${1}; shift + local -n package_output_paths_ref=${1}; shift + local pkg_to_tags_mvm_var_name=${1}; shift + local old_pkg=${1}; shift + local new_pkg=${1}; shift + local old=${1}; shift + local new=${1}; shift # shellcheck source=for-shellcheck/globals source "${WORKDIR}/globals" - local old_no_r new_no_r - old_no_r=${old%-r+([0-9])} - new_no_r=${new%-r+([0-9])} + local old_no_r=${old%-r+([0-9])} + local new_no_r=${new%-r+([0-9])} - local pkg_name - pkg_name=${new_pkg#*/} - local -a lines - lines=( "0:from ${old} to ${new}") + local pkg_name=${new_pkg#*/} + local -a lines=( "0:from ${old} to ${new}" ) if [[ ${old_pkg} != "${new_pkg}" ]]; then lines+=( "0:renamed from ${old_pkg}" ) fi - generate_ebuild_diff "${OLD_PORTAGE_STABLE}" "${NEW_PORTAGE_STABLE}" "${old_pkg}" "${new_pkg}" "${old_s}" "${new_s}" "${old}" "${new}" - - local hpu_update_dir hpu_update_dir_non_slot - update_dir_non_slot "${new_pkg}" hpu_update_dir_non_slot - update_dir "${new_pkg}" "${old_s}" "${new_s}" hpu_update_dir + local out_dir=${package_output_paths_ref[POP_PKG_SLOT_OUT_DIR_IDX]} + generate_ebuild_diff "${out_dir}" "${OLD_PORTAGE_STABLE}" "${NEW_PORTAGE_STABLE}" "${old_pkg}" "${new_pkg}" "${old}" "${new}" local diff_report_name gen_varname diff_report_name @@ -2182,10 +2721,11 @@ function handle_pkg_update() { unset -n diff_report_ref diff_report_unset "${diff_report_name}" - if [[ -s "${hpu_update_dir}/ebuild.diff" ]]; then + if [[ -s "${out_dir}/ebuild.diff" ]]; then lines+=( '0:TODO: review ebuild.diff' ) fi - if [[ -s "${hpu_update_dir_non_slot}/other.diff" ]]; then + local out_dir_non_slot=${package_output_paths_ref[POP_PKG_OUT_DIR_IDX]} + if [[ -s "${out_dir_non_slot}/other.diff" ]]; then lines+=( '0:TODO: review other.diff' ) fi lines+=( '0:TODO: review occurences' ) @@ -2196,13 +2736,15 @@ function handle_pkg_update() { local -a hpu_tags tags_for_pkg "${pkg_to_tags_mvm_var_name}" "${new_pkg}" hpu_tags + local top_out_dir=${package_output_paths_ref[POP_OUT_DIR_IDX]} + if ver_test "${new_no_r}" -gt "${old_no_r}"; then # version bump - generate_changelog_entry_stub "${pkg_name}" "${new_no_r}" "${hpu_tags[@]}" + generate_changelog_entry_stub "${top_out_dir}" "${pkg_name}" "${new_no_r}" "${hpu_tags[@]}" lines+=( '0:release notes: TODO' ) fi - generate_summary_stub "${new_pkg}" "${hpu_tags[@]}" -- "${lines[@]}" + generate_summary_stub "${top_out_dir}" "${new_pkg}" "${hpu_tags[@]}" -- "${lines[@]}" } # Write information to reports directory about the modified package @@ -2211,48 +2753,38 @@ function handle_pkg_update() { # # Params: # -# 1 - name of the package tags set mvm variable -# 2 - old package name -# 3 - new package name -# 4 - old package slot -# 5 - new package slot -# 6 - version -# 7 - name of a "bool" variable where info is stored if relevant files +# 1 - package output paths variable name +# 2 - name of the package tags set mvm variable +# 3 - old package name +# 4 - new package name +# 5 - version +# 6 - name of a "bool" variable where info is stored if relevant files # has changed (empty means nothing changed, non-empty means # something has changed) function handle_pkg_as_is() { - local pkg_to_tags_mvm_var_name old_pkg new_pkg old_s new_s v - pkg_to_tags_mvm_var_name=${1}; shift - old_pkg=${1}; shift - new_pkg=${1}; shift - old_s=${1}; shift - new_s=${1}; shift - v=${1}; shift + local -n package_output_paths_ref=${1}; shift + local pkg_to_tags_mvm_var_name=${1}; shift + local old_pkg=${1}; shift + local new_pkg=${1}; shift + local v=${1}; shift local -n changed_ref=${1}; shift # shellcheck source=for-shellcheck/globals source "${WORKDIR}/globals" - local hpai_update_dir - update_dir "${new_pkg}" "${old_s}" "${new_s}" hpai_update_dir - - local pkg_name - pkg_name=${new_pkg#/} - local -a lines - lines=( "0:still at ${v}" ) + local pkg_name=${new_pkg#/} + local -a lines=( "0:still at ${v}" ) - local renamed - renamed= + local renamed='' if [[ ${old_pkg} != "${new_pkg}" ]]; then lines+=( "0:renamed from ${old_pkg}" ) renamed=x fi - generate_ebuild_diff "${OLD_PORTAGE_STABLE}" "${NEW_PORTAGE_STABLE}" "${old_pkg}" "${new_pkg}" "${old_s}" "${new_s}" "${v}" "${v}" - local hpai_update_dir_non_slot hpai_update_dir - update_dir_non_slot "${new_pkg}" hpai_update_dir_non_slot - update_dir "${new_pkg}" "${old_s}" "${new_s}" hpai_update_dir - local modified - modified= + + local out_dir=${package_output_paths_ref[POP_PKG_SLOT_OUT_DIR_IDX]} + generate_ebuild_diff "${out_dir}" "${OLD_PORTAGE_STABLE}" "${NEW_PORTAGE_STABLE}" "${old_pkg}" "${new_pkg}" "${v}" "${v}" + + local modified='' local diff_report_name gen_varname diff_report_name @@ -2269,11 +2801,12 @@ function handle_pkg_as_is() { unset -n diff_report_ref diff_report_unset "${diff_report_name}" - if [[ -s "${hpai_update_dir}/ebuild.diff" ]]; then + if [[ -s "${out_dir}/ebuild.diff" ]]; then lines+=( '0:TODO: review ebuild.diff' ) modified=x fi - if [[ -s "${hpai_update_dir_non_slot}/other.diff" ]]; then + local out_dir_non_slot=${package_output_paths_ref[POP_PKG_OUT_DIR_IDX]} + if [[ -s "${out_dir_non_slot}/other.diff" ]]; then lines+=( '0:TODO: review other.diff' ) modified=x fi @@ -2287,9 +2820,11 @@ function handle_pkg_as_is() { lines+=( '0:TODO: review occurences-for-old-name' ) fi + local top_out_dir=${package_output_paths_ref[POP_OUT_DIR_IDX]} + local -a hpai_tags tags_for_pkg "${pkg_to_tags_mvm_var_name}" "${new_pkg}" hpai_tags - generate_summary_stub "${new_pkg}" "${hpai_tags[@]}" -- "${lines[@]}" + generate_summary_stub "${top_out_dir}" "${new_pkg}" "${hpai_tags[@]}" -- "${lines[@]}" } # Write information to reports directory about the package downgrade @@ -2298,42 +2833,34 @@ function handle_pkg_as_is() { # # Params: # -# 1 - name of the package tags set mvm variable -# 2 - old package name -# 3 - new package name -# 4 - old package slot -# 5 - new package slot -# 6 - old version -# 7 - new version +# 1 - package output paths variable name +# 2 - name of the package tags set mvm variable +# 3 - old package name +# 4 - new package name +# 5 - old version +# 6 - new version function handle_pkg_downgrade() { - local pkg_to_tags_mvm_var_name old_pkg new_pkg old_s new_s old new - pkg_to_tags_mvm_var_name=${1}; shift - old_pkg=${1}; shift - new_pkg=${1}; shift - old_s=${1}; shift - new_s=${1}; shift - old=${1}; shift - new=${1}; shift + local -n package_output_paths_ref=${1}; shift + local pkg_to_tags_mvm_var_name=${1}; shift + local old_pkg=${1}; shift + local new_pkg=${1}; shift + local old=${1}; shift + local new=${1}; shift # shellcheck source=for-shellcheck/globals source "${WORKDIR}/globals" - local old_no_r new_no_r - old_no_r=${old%-r+([0-9])} - new_no_r=${new%-r+([0-9])} + local old_no_r=${old%-r+([0-9])} + local new_no_r=${new%-r+([0-9])} - local pkg_name - pkg_name=${new_pkg#*/} - local -a lines - lines=( "0:downgraded from ${old} to ${new}" ) + local pkg_name=${new_pkg#*/} + local -a lines=( "0:downgraded from ${old} to ${new}" ) if [[ ${old_pkg} != "${new_pkg}" ]]; then lines+=( "0:renamed from ${old_pkg}" ) fi - generate_ebuild_diff "${OLD_PORTAGE_STABLE}" "${NEW_PORTAGE_STABLE}" "${old_pkg}" "${new_pkg}" "${old_s}" "${new_s}" "${old}" "${new}" - local hpd_update_dir hpd_update_dir_non_slot - update_dir_non_slot "${new_pkg}" hpd_update_dir_non_slot - update_dir "${new_pkg}" "${old_s}" "${new_s}" hpd_update_dir + local out_dir=${package_output_paths_ref[POP_PKG_SLOT_OUT_DIR_IDX]} + generate_ebuild_diff "${out_dir}" "${OLD_PORTAGE_STABLE}" "${NEW_PORTAGE_STABLE}" "${old_pkg}" "${new_pkg}" "${old}" "${new}" local diff_report_name gen_varname diff_report_name @@ -2347,10 +2874,11 @@ function handle_pkg_downgrade() { unset -n diff_report_ref diff_report_unset "${diff_report_name}" - if [[ -s "${hpd_update_dir}/ebuild.diff" ]]; then + if [[ -s "${out_dir}/ebuild.diff" ]]; then lines+=( '0:TODO: review ebuild.diff' ) fi - if [[ -s "${hpd_update_dir_non_slot}/other.diff" ]]; then + local out_dir_non_slot=${package_output_paths_ref[POP_PKG_OUT_DIR_IDX]} + if [[ -s "${out_dir_non_slot}/other.diff" ]]; then lines+=( '0:TODO: review other.diff' ) fi lines+=( '0:TODO: review occurences' ) @@ -2361,13 +2889,15 @@ function handle_pkg_downgrade() { local -a hpd_tags tags_for_pkg "${pkg_to_tags_mvm_var_name}" "${new_pkg}" hpd_tags + local top_out_dir=${package_output_paths_ref[POP_OUT_DIR_IDX]} + if ver_test "${new_no_r}" -lt "${old_no_r}"; then # version bump - generate_changelog_entry_stub "${pkg_name}" "${new_no_r}" "${hpd_tags[@]}" + generate_changelog_entry_stub "${top_out_dir}" "${pkg_name}" "${new_no_r}" "${hpd_tags[@]}" lines+=( "0:release notes: TODO" ) fi - generate_summary_stub "${new_pkg}" "${hpd_tags[@]}" -- "${lines[@]}" + generate_summary_stub "${top_out_dir}" "${new_pkg}" "${hpd_tags[@]}" -- "${lines[@]}" } # Retrieves tags for a package. @@ -2402,13 +2932,14 @@ function tags_for_pkg() { # Adds a changelog stub to changelog file in reports directory. # # Params: -# 1 - package name (shortened, without the category) -# 2 - version +# 1 - output directory +# 2 - package name (shortened, without the category) +# 3 - version # @ - package tags function generate_changelog_entry_stub() { - local pkg_name v - pkg_name=${1}; shift - v=${1}; shift + local out_dir=${1}; shift + local pkg_name=${1}; shift + local v=${1}; shift # rest are tags local -a applied_tags=() @@ -2431,28 +2962,22 @@ function generate_changelog_entry_stub() { gces_tags='SDK' fi - # shellcheck source=for-shellcheck/globals - source "${WORKDIR}/globals" - - printf '%s %s: %s ([%s](TODO))\n' '-' "${gces_tags}" "${pkg_name}" "${v}" >>"${REPORTS_DIR}/updates/changelog_stubs" + printf '%s %s: %s ([%s](TODO))\n' '-' "${gces_tags}" "${pkg_name}" "${v}" >>"${out_dir}/changelog_stubs" } # Adds a stub to the summary file in reports directory. # # Params: -# 1 - package +# 1 - output directory +# 2 - package # @ - tags followed by double dash followed by lines to append to the # file function generate_summary_stub() { - local pkg - pkg=${1}; shift + local out_dir=${1}; shift + local pkg=${1}; shift # rest are tags separated followed by double dash followed by lines - # shellcheck source=for-shellcheck/globals - source "${WORKDIR}/globals" - - local -a tags - tags=() + local -a tags=() while [[ ${#} -gt 0 ]]; do if [[ ${1} = '--' ]]; then shift @@ -2479,7 +3004,7 @@ function generate_summary_stub() { printf ' - %s\n' "${line}" done printf '\n' - } >>"${REPORTS_DIR}/updates/summary_stubs" + } >>"${out_dir}/summary_stubs" } # Generate diffs between directories in old state and new state for a @@ -2487,30 +3012,27 @@ function generate_summary_stub() { # # Params: # -# 1 - path to portage-stable in old state -# 2 - path to portage-stable in new state -# 3 - old package name -# 4 - new package name +# 1 - output directory +# 2 - path to portage-stable in old state +# 3 - path to portage-stable in new state +# 4 - old package name +# 5 - new package name function generate_full_diffs() { - local old_ps new_ps old_pkg new_pkg - old_ps=${1}; shift - new_ps=${1}; shift - old_pkg=${1}; shift - new_pkg=${1}; shift - - local old_path new_path - old_path="${old_ps}/${old_pkg}" - new_path="${new_ps}/${new_pkg}" + local out_dir=${1}; shift + local old_ps=${1}; shift + local new_ps=${1}; shift + local old_pkg=${1}; shift + local new_pkg=${1}; shift - local gfd_update_dir - update_dir_non_slot "${new_pkg}" gfd_update_dir + local old_path="${old_ps}/${old_pkg}" + local new_path="${new_ps}/${new_pkg}" local -a common_diff_opts=( --recursive --unified=3 ) - xdiff "${common_diff_opts[@]}" --new-file "${old_path}" "${new_path}" >"${gfd_update_dir}/full.diff" - xdiff "${common_diff_opts[@]}" --brief "${old_path}" "${new_path}" >"${gfd_update_dir}/brief-summary" + xdiff "${common_diff_opts[@]}" --new-file "${old_path}" "${new_path}" >"${out_dir}/full.diff" + xdiff "${common_diff_opts[@]}" --brief "${old_path}" "${new_path}" >"${out_dir}/brief-summary" } # Generate a diff between non-ebuild, non-Manifest files for old and @@ -2518,23 +3040,20 @@ function generate_full_diffs() { # # Params: # -# 1 - path to portage-stable in old state -# 2 - path to portage-stable in new state -# 3 - old package name -# 4 - new package name +# 1 - output directory +# 2 - path to portage-stable in old state +# 3 - path to portage-stable in new state +# 4 - old package name +# 5 - new package name function generate_non_ebuild_diffs() { - local old_ps new_ps old_pkg new_pkg - old_ps=${1}; shift - new_ps=${1}; shift - old_pkg=${1}; shift - new_pkg=${1}; shift - - local old_path new_path - old_path="${old_ps}/${old_pkg}" - new_path="${new_ps}/${new_pkg}" + local out_dir=${1}; shift + local old_ps=${1}; shift + local new_ps=${1}; shift + local old_pkg=${1}; shift + local new_pkg=${1}; shift - local gned_update_dir - update_dir_non_slot "${new_pkg}" gned_update_dir + local old_path="${old_ps}/${old_pkg}" + local new_path="${new_ps}/${new_pkg}" local -a diff_opts=( --recursive @@ -2545,43 +3064,36 @@ function generate_non_ebuild_diffs() { --exclude='*.ebuild' --exclude='Manifest' ) - xdiff "${diff_opts[@]}" "${old_path}" "${new_path}" >"${gned_update_dir}/other.diff" + xdiff "${diff_opts[@]}" "${old_path}" "${new_path}" >"${out_dir}/other.diff" } # Generate a diff between specific ebuilds for old and new package. # # Params: # -# 1 - path to portage-stable in old state -# 2 - path to portage-stable in new state -# 3 - old package name -# 4 - new package name -# 5 - old package slot -# 6 - new package slot -# 7 - old package version -# 8 - new package version +# 1 - output directory +# 2 - path to portage-stable in old state +# 3 - path to portage-stable in new state +# 4 - old package name +# 5 - new package name +# 6 - old package version +# 7 - new package version function generate_ebuild_diff() { - local old_ps new_ps old_pkg new_pkg old_s new_s old new - old_ps=${1}; shift - new_ps=${1}; shift - old_pkg=${1}; shift - new_pkg=${1}; shift - old_s=${1}; shift - new_s=${1}; shift - old=${1}; shift - new=${1}; shift + local out_dir=${1}; shift + local old_ps=${1}; shift + local new_ps=${1}; shift + local old_pkg=${1}; shift + local new_pkg=${1}; shift + local old=${1}; shift + local new=${1}; shift - local old_pkg_name new_pkg_name - old_pkg_name=${old_pkg#*/} - new_pkg_name=${new_pkg#*/} + local old_pkg_name=${old_pkg#*/} + local new_pkg_name=${new_pkg#*/} - local old_path new_path - old_path="${old_ps}/${old_pkg}/${old_pkg_name}-${old}.ebuild" - new_path="${new_ps}/${new_pkg}/${new_pkg_name}-${new}.ebuild" + local old_path="${old_ps}/${old_pkg}/${old_pkg_name}-${old}.ebuild" + local new_path="${new_ps}/${new_pkg}/${new_pkg_name}-${new}.ebuild" - local ged_update_dir - update_dir "${new_pkg}" "${old_s}" "${new_s}" ged_update_dir - xdiff --unified=3 "${old_path}" "${new_path}" >"${ged_update_dir}/ebuild.diff" + xdiff --unified=3 "${old_path}" "${new_path}" >"${out_dir}/ebuild.diff" } function generate_cache_diff_report() { @@ -2615,22 +3127,20 @@ function generate_cache_diff_report() { # are mentioned in entire scripts repository. May result in two # separate reports if the package got renamed. # -# 1 - path to scripts repo -# 2 - old package name -# 3 - new package name +# 1 - output directory +# 2 - path to scripts repo +# 3 - old package name +# 4 - new package name function generate_package_mention_reports() { - local scripts old_pkg new_pkg - scripts=${1}; shift - old_pkg=${1}; shift - new_pkg=${1}; shift - - local gpr_update_dir - update_dir_non_slot "${new_pkg}" gpr_update_dir + local out_dir=${1}; shift + local scripts=${1}; shift + local old_pkg=${1}; shift + local new_pkg=${1}; shift - generate_mention_report_for_package "${scripts}" "${new_pkg}" >"${gpr_update_dir}/occurences" + generate_mention_report_for_package "${scripts}" "${new_pkg}" >"${out_dir}/occurences" if [[ ${old_pkg} != "${new_pkg}" ]]; then - generate_mention_report_for_package "${scripts}" "${old_pkg}" >"${gpr_update_dir}/occurences-for-old-name" + generate_mention_report_for_package "${scripts}" "${old_pkg}" >"${out_dir}/occurences-for-old-name" fi } @@ -2681,39 +3191,17 @@ function generate_mention_report_for_package() { grep_pkg "${scripts}" "${pkg}" ":(exclude)${ps}" ":(exclude)${co}" } -# Gets a toplevel update reports directory for a package. This is -# where occurences and non-ebuild diffs are stored. +# Gets a slot-specific directory name for ebuild diffs. # # Params: # -# 1 - package name -# 2 - name of a variable where the path will be stored -function update_dir_non_slot() { - local pkg - pkg=${1}; shift - local -n dir_ref=${1}; shift - - # shellcheck source=for-shellcheck/globals - source "${WORKDIR}/globals" - - dir_ref="${REPORTS_DIR}/updates/${pkg}" -} - -# Gets a slot specific update reports directory for a package. This is -# where ebuild diffs are stored. -# -# Params: -# -# 1 - package name -# 2 - old slot -# 3 - new slot -# 4 - name of a variable where the path will be stored -function update_dir() { - local pkg old_s new_s - pkg=${1}; shift - old_s=${1}; shift - new_s=${1}; shift - local -n dir_ref=${1}; shift +# 1 - old slot +# 2 - new slot +# 3 - name of a variable where the path will be stored +function slot_dirname() { + local old_s=${1}; shift + local new_s=${1}; shift + local -n dirname_ref=${1}; shift # slots may have slashes in them - replace them with "-slash-" local slot_dir @@ -2723,9 +3211,7 @@ function update_dir() { slot_dir="${old_s//\//-slash-}-to-${new_s//\//-slash-}" fi - local ud_non_slot_dir - update_dir_non_slot "${pkg}" ud_non_slot_dir - dir_ref="${ud_non_slot_dir}/${slot_dir}" + dirname_ref=${slot_dir} } # Greps for a package name in selected directories of the passed @@ -2766,17 +3252,19 @@ function handle_gentoo_sync() { mvm_declare hgs_pkg_to_tags_mvm process_listings hgs_pkg_to_tags_mvm + # shellcheck source=for-shellcheck/globals + source "${WORKDIR}/globals" + local -A hgs_renames_old_to_new_map=() process_profile_updates_directory hgs_renames_old_to_new_map + mkdir -p "${REPORTS_DIR}/updates" + handle_package_changes hgs_renames_old_to_new_map hgs_pkg_to_tags_mvm mvm_unset hgs_pkg_to_tags_mvm #mvm_debug_disable hgs_pkg_to_tags_mvm - # shellcheck source=for-shellcheck/globals - source "${WORKDIR}/globals" - local old_head new_head old_head=$(git -C "${OLD_STATE}" rev-parse HEAD) new_head=$(git -C "${NEW_STATE}" rev-parse HEAD) @@ -2972,6 +3460,8 @@ function handle_eclass() { # shellcheck source=for-shellcheck/globals source "${WORKDIR}/globals" + info "handling update of ${eclass}" + local -a lines lines=() if [[ -e "${OLD_PORTAGE_STABLE}/${eclass}" ]] && [[ -e "${NEW_PORTAGE_STABLE}/${eclass}" ]]; then @@ -2983,7 +3473,7 @@ function handle_eclass() { else lines+=( '0:added from Gentoo' ) fi - generate_summary_stub "${eclass}" -- "${lines[@]}" + generate_summary_stub "${REPORTS_DIR}/updates" "${eclass}" -- "${lines[@]}" } # Handle profile changes. Generates three different diffs - changes in @@ -2994,6 +3484,8 @@ function handle_profiles() { # shellcheck source=for-shellcheck/globals source "${WORKDIR}/globals" + info "handling update of profiles" + local -a files=() local which arch for which in "${WHICH[@]}"; do @@ -3055,7 +3547,7 @@ function handle_profiles() { done <"${out_dir}/full.diff" lines_to_file_truncate "${out_dir}/relevant.diff" "${relevant_lines[@]}" lines_to_file_truncate "${out_dir}/possibly-irrelevant-files" "${possibly_irrelevant_files[@]}" - generate_summary_stub profiles -- '0:TODO: review the diffs' + generate_summary_stub "${REPORTS_DIR}/updates" profiles -- '0:TODO: review the diffs' } # Handles changes in license directory. Generates brief reports and @@ -3064,6 +3556,8 @@ function handle_licenses() { # shellcheck source=for-shellcheck/globals source "${WORKDIR}/globals" + info "handling update of licenses" + local -a dropped=() added=() changed=() local line hl_stripped # Lines are: @@ -3129,7 +3623,7 @@ function handle_licenses() { join_by joined ', ' "${changed[@]}" lines+=( "0:updated ${joined}" ) fi - generate_summary_stub licenses -- "${lines[@]}" + generate_summary_stub "${REPORTS_DIR}/updates" licenses -- "${lines[@]}" } # Generates reports about changes inside the scripts directory. @@ -3137,12 +3631,14 @@ function handle_scripts() { # shellcheck source=for-shellcheck/globals source "${WORKDIR}/globals" + info "handling update of scripts" + local out_dir out_dir="${REPORTS_DIR}/updates/scripts" mkdir -p "${out_dir}" xdiff --unified=3 --recursive "${OLD_PORTAGE_STABLE}/scripts" "${NEW_PORTAGE_STABLE}/scripts" >"${out_dir}/scripts.diff" - generate_summary_stub scripts -- '0:TODO: review the diffs' + generate_summary_stub "${REPORTS_DIR}/updates" scripts -- '0:TODO: review the diffs' } fi diff --git a/pkg_auto/impl/util.sh b/pkg_auto/impl/util.sh index 267b550d111..2f1b0d4d324 100644 --- a/pkg_auto/impl/util.sh +++ b/pkg_auto/impl/util.sh @@ -333,4 +333,20 @@ function struct_declare() { declare "${args[@]}" "${@}" } +function get_num_proc() { + local -n num_proc_ref=${1}; shift + local -i num_proc=1 rv=1 + + # stolen from portage + [[ rv -eq 0 ]] || { rv=0; num_proc=$(getconf _NPROCESSORS_ONLN 2>/dev/null) || rv=1; } + [[ rv -eq 0 ]] || { rv=0; num_proc=$(sysctl -n hw.ncpu 2>/dev/null) || rv=1; } + # stolen from my head + [[ rv -eq 0 ]] || { rv=0; num_proc=$(nproc) || rv=1; } + # stolen from common.sh + [[ rv -eq 0 ]] || { rv=0; num_proc=$(grep -c "^processor" /proc/cpuinfo 2>/dev/null) || rv=1; } + [[ rv -eq 0 ]] || { rv=0; num_proc=1; } + + num_proc_ref=${num_proc} +} + fi