[PATCH 00/16] pickman: Support monitoring and fixing pipeline failures
From: Simon Glass <simon.glass@canonical.com> This series contains three groups of improvements to pickman: Refactoring (patches 1-8): Clean up the codebase by splitting large functions into smaller helpers, adding test coverage for prepare_apply(), extracting a run_agent_collect() helper to remove duplicated agent-streaming code, and switching to tools.read/write_file() for file I/O. Subtree handling (patches 9-10): Teach pickman to detect and handle dts/upstream subtree merges, and process subtree updates even when the maximum number of MRs is reached. Pipeline fixes (patches 11-16): Add infrastructure and logic for automatically diagnosing and fixing CI pipeline failures using a Claude agent, including GitLab API helpers, database tracking, null-byte stripping in job logs, and a retry/comment system. Simon Glass (16): pickman: Drop unnecessary f-string prefixes in do_rewind() pickman: Refactor do_rewind() into smaller helpers pickman: Refactor do_next_merges() into smaller helpers pickman: Refactor decompose_mega_merge() into smaller helpers pickman: Refactor handle_already_applied() into smaller helpers pickman: Add tests for prepare_apply() pickman: Refactor the initial part of prepare_apply() pickman: Handle dts/upstream subtree merges automatically pickman: Create a function to run an agent pickman: Use tools for file I/O pickman: Use tools for file I/O in tests pickman: Add a database table to track pipeline fixes pickman: Add pipeline helpers to gitlab_api pickman: Support automatically fixing pipeline failures pickman: Process subtree updates even at max MRs pickman: Strip null bytes from CI job logs tools/pickman/README.rst | 81 ++ tools/pickman/__main__.py | 6 + tools/pickman/agent.py | 237 ++++- tools/pickman/control.py | 972 +++++++++++++++----- tools/pickman/database.py | 65 +- tools/pickman/ftest.py | 1661 ++++++++++++++++++++++++++++++++--- tools/pickman/gitlab_api.py | 78 +- 7 files changed, 2744 insertions(+), 356 deletions(-) -- 2.43.0 base-commit: c42396807e2c92a1f87b9d671e2e09c7bf13caf2 branch: picke
From: Simon Glass <simon.glass@canonical.com> Three tout.info() calls in do_rewind() use f-strings that contain no interpolated variables, triggering pylint W1309. Remove the f prefix from these plain strings. Co-developed-by: Claude Opus 4.6 <noreply@anthropic.com> Signed-off-by: Simon Glass <simon.glass@canonical.com> --- tools/pickman/control.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/pickman/control.py b/tools/pickman/control.py index 8f7568e47bf..40bcaaef120 100644 --- a/tools/pickman/control.py +++ b/tools/pickman/control.py @@ -1952,18 +1952,18 @@ def do_rewind(args, dbs): tout.info(f"{prefix}Rewind '{source}': " f'{current_short} -> {target_chash}') tout.info(f' Target: {target_chash} {target_subject}') - tout.info(f' Merges being rewound:') + tout.info(' Merges being rewound:') for i in range(target_idx): tout.info(f' {merges[i][1]} {merges[i][2]}') tout.info(f' Commits to delete from database: {len(db_commits)}') if matched_mrs: - tout.info(f' MRs to delete on GitLab:') + tout.info(' MRs to delete on GitLab:') for merge_req in matched_mrs: tout.info(f' !{merge_req.iid}: {merge_req.title}') tout.info(f' {merge_req.web_url}') elif mr_branches: - tout.info(f' Branches to check for MRs:') + tout.info(' Branches to check for MRs:') for branch in mr_branches: tout.info(f' {branch}') -- 2.43.0
From: Simon Glass <simon.glass@canonical.com> do_rewind() is ~145 lines with 8 distinct sequential phases, making it hard to follow. Extract five private helpers that each handle one phase: - _rewind_fetch_merges(): fetch merge history and locate the target - _rewind_get_range_commits(): list commits in the range and filter to those in the database - _rewind_find_branches(): match cherry-pick branches against the range - _rewind_find_mrs(): look up MR details for matching branches - _rewind_show_summary(): display the dry-run / execution summary This reduces do_rewind() to ~40 lines of sequential calls with clear control flow, while each helper is independently understandable. Co-developed-by: Claude Opus 4.6 <noreply@anthropic.com> Signed-off-by: Simon Glass <simon.glass@canonical.com> --- tools/pickman/control.py | 196 +++++++++++++++++++++++++-------------- 1 file changed, 124 insertions(+), 72 deletions(-) diff --git a/tools/pickman/control.py b/tools/pickman/control.py index 40bcaaef120..868e03df8c9 100644 --- a/tools/pickman/control.py +++ b/tools/pickman/control.py @@ -1836,33 +1836,13 @@ def do_commit_source(args, dbs): return 0 -def do_rewind(args, dbs): - """Rewind the source position back by N merges - - By default performs a dry run, showing what would happen. Use --force - to actually execute the rewind. - - Walks back N merges on the first-parent chain from the current source - position, deletes the commits in that range from the database, and - resets the source to the earlier position. - - Args: - args (Namespace): Parsed arguments with 'source', 'count', 'force' - dbs (Database): Database instance +def _rewind_fetch_merges(current, count): + """Fetch first-parent merges and find target index. Returns: - int: 0 on success, 1 on failure + tuple: (merges, target_idx) where merges is a list of + (hash, short_hash, subject) tuples, or None on error """ - source = args.source - count = args.count - force = args.force - - current = dbs.source_get(source) - if not current: - tout.error(f"Source '{source}' not found in database") - return 1 - - # We need to find merges *before* current. Use ancestry instead. try: out = run_git([ 'log', '--first-parent', '--merges', '--format=%H|%h|%s', @@ -1870,31 +1850,34 @@ def do_rewind(args, dbs): ]) except Exception: # pylint: disable=broad-except tout.error(f'Could not read merge history for {current[:12]}') - return 1 + return None if not out: tout.error('No merges found in history') - return 1 + return None - # Parse merges - first line is current (or nearest merge), last is target merges = [] for line in out.strip().split('\n'): if not line: continue parts = line.split('|', 2) - merges.append((parts[0], parts[1], parts[2] if len(parts) > 2 else '')) + merges.append((parts[0], parts[1], + parts[2] if len(parts) > 2 else '')) if len(merges) < 2: tout.error(f'Not enough merges to rewind by {count}') - return 1 + return None - # The target is count merges back from the first entry target_idx = min(count, len(merges) - 1) - target_hash = merges[target_idx][0] - target_chash = merges[target_idx][1] - target_subject = merges[target_idx][2] + return merges, target_idx + - # Get all commits in the range target..current +def _rewind_get_range_commits(dbs, target_hash, current): + """Get commits in range and filter to those in database. + + Returns: + tuple: (range_hashes_str, db_commits_list) or None on error + """ try: range_hashes = run_git([ 'rev-list', f'{target_hash}..{current}' @@ -1902,52 +1885,79 @@ def do_rewind(args, dbs): except Exception: # pylint: disable=broad-except tout.error(f'Could not list commits in range ' f'{target_hash[:12]}..{current[:12]}') - return 1 + return None - # Count commits that exist in the database db_commits = [] if range_hashes: for chash in range_hashes.strip().split('\n'): if chash and dbs.commit_get(chash): db_commits.append(chash) - # Find cherry-pick branches that match commits in the range. - # List all ci/cherry-* remote branches, then check if the hash in - # the branch name matches any commit in the rewound range. + return range_hashes, db_commits + + +def _rewind_find_branches(range_hashes, remote): + """Find cherry-pick branches matching commits in the range. + + Returns: + list: Branch names (without remote prefix) that match + """ + if not range_hashes: + return [] + + hash_set = set(range_hashes.strip().split('\n')) + try: + branch_out = run_git( + ['branch', '-r', '--list', f'{remote}/cherry-*']) + except Exception: # pylint: disable=broad-except + branch_out = '' + mr_branches = [] - if range_hashes: - hash_set = set(range_hashes.strip().split('\n')) - try: - branch_out = run_git( - ['branch', '-r', '--list', f'{args.remote}/cherry-*']) - except Exception: # pylint: disable=broad-except - branch_out = '' - remote_prefix = f'{args.remote}/' - for line in branch_out.strip().split('\n'): - branch = line.strip() - if not branch: - continue - # Branch is like 'ci/cherry-abc1234'; extract the hash part - short = branch.removeprefix(f'{remote_prefix}cherry-') - # Check if any commit in the range starts with this hash - for chash in hash_set: - if chash.startswith(short): - mr_branches.append( - branch.removeprefix(remote_prefix)) - break - - # Look up MR details from GitLab for matching branches + remote_prefix = f'{remote}/' + for line in branch_out.strip().split('\n'): + branch = line.strip() + if not branch: + continue + # Branch is like 'ci/cherry-abc1234'; extract the hash part + short = branch.removeprefix(f'{remote_prefix}cherry-') + # Check if any commit in the range starts with this hash + for chash in hash_set: + if chash.startswith(short): + mr_branches.append( + branch.removeprefix(remote_prefix)) + break + + return mr_branches + + +def _rewind_find_mrs(mr_branches, remote): + """Look up MR details for matching branches. + + Returns: + list: PickmanMr objects whose source_branch matches + """ + if not mr_branches: + return [] + matched_mrs = [] - if mr_branches: - mrs = gitlab_api.get_open_pickman_mrs(args.remote) - if mrs: - branch_set = set(mr_branches) - for merge_req in mrs: - if merge_req.source_branch in branch_set: - matched_mrs.append(merge_req) - - # Show what would happen (or what is happening) + mrs = gitlab_api.get_open_pickman_mrs(remote) + if mrs: + branch_set = set(mr_branches) + for merge_req in mrs: + if merge_req.source_branch in branch_set: + matched_mrs.append(merge_req) + + return matched_mrs + + +def _rewind_show_summary(source, current, merges, target_idx, + db_commits, matched_mrs, mr_branches, + force): + """Display rewind summary.""" current_short = current[:12] + target_chash = merges[target_idx][1] + target_subject = merges[target_idx][2] + prefix = '' if force else '[dry run] ' tout.info(f"{prefix}Rewind '{source}': " f'{current_short} -> {target_chash}') @@ -1967,15 +1977,57 @@ def do_rewind(args, dbs): for branch in mr_branches: tout.info(f' {branch}') + +def do_rewind(args, dbs): + """Rewind the source position back by N merges + + By default performs a dry run, showing what would happen. Use --force + to actually execute the rewind. + + Walks back N merges on the first-parent chain from the current source + position, deletes the commits in that range from the database, and + resets the source to the earlier position. + + Args: + args (Namespace): Parsed arguments with 'source', 'count', 'force' + dbs (Database): Database instance + + Returns: + int: 0 on success, 1 on failure + """ + source = args.source + count = args.count + force = args.force + + current = dbs.source_get(source) + if not current: + tout.error(f"Source '{source}' not found in database") + return 1 + + result = _rewind_fetch_merges(current, count) + if not result: + return 1 + merges, target_idx = result + target_hash = merges[target_idx][0] + + result = _rewind_get_range_commits(dbs, target_hash, current) + if not result: + return 1 + range_hashes, db_commits = result + + mr_branches = _rewind_find_branches(range_hashes, args.remote) + matched_mrs = _rewind_find_mrs(mr_branches, args.remote) + + _rewind_show_summary(source, current, merges, target_idx, + db_commits, matched_mrs, mr_branches, force) + if not force: tout.info('Use --force to execute this rewind') return 0 - # Delete commits from database for chash in db_commits: dbs.commit_delete(chash) - # Update source position dbs.source_set(source, target_hash) dbs.commit() -- 2.43.0
From: Simon Glass <simon.glass@canonical.com> do_next_merges() mixes fetching, display-building and output in a single function. Extract three private helpers that each handle one phase: - _next_fetch_merges(): fetch and parse merge commits from git log - _next_build_display(): expand mega-merges into sub-merge display entries - _next_show_merges(): print the formatted listing This reduces do_next_merges() to ~20 lines of sequential calls. Co-developed-by: Claude Opus 4.6 <noreply@anthropic.com> Signed-off-by: Simon Glass <simon.glass@canonical.com> --- tools/pickman/control.py | 72 +++++++++++++++++++++++++++------------- 1 file changed, 49 insertions(+), 23 deletions(-) diff --git a/tools/pickman/control.py b/tools/pickman/control.py index 868e03df8c9..d3cb8a8fff4 100644 --- a/tools/pickman/control.py +++ b/tools/pickman/control.py @@ -1081,27 +1081,12 @@ def do_next_set(args, dbs): return 0 -def do_next_merges(args, dbs): - """Show the next N merges to be applied from a source - - Args: - args (Namespace): Parsed arguments with 'source' and 'count' attributes - dbs (Database): Database instance +def _next_fetch_merges(last_commit, source, count): + """Fetch the next merge commits from a source. Returns: - int: 0 on success, 1 if source not found + list: (hash, short_hash, subject) tuples, up to count entries """ - source = args.source - count = args.count - - # Get the last cherry-picked commit from database - last_commit = dbs.source_get(source) - - if not last_commit: - tout.error(f"Source '{source}' not found in database") - return 1 - - # Find merge commits on the first-parent chain out = run_git([ 'log', '--reverse', '--first-parent', '--merges', '--format=%H|%h|%s', @@ -1109,8 +1094,7 @@ def do_next_merges(args, dbs): ]) if not out: - tout.info('No merges remaining') - return 0 + return [] merges = [] for line in out.split('\n'): @@ -1124,9 +1108,18 @@ def do_next_merges(args, dbs): if len(merges) >= count: break - # Build display list, expanding mega-merges into sub-merges - # Each entry is (chash, subject, is_mega, sub_list) where sub_list - # is a list of (chash, subject) for mega-merge sub-merges + return merges + + +def _next_build_display(merges): + """Build display list, expanding mega-merges into sub-merges. + + Each entry is (chash, subject, is_mega, sub_list) where sub_list + is a list of (chash, subject) for mega-merge sub-merges. + + Returns: + tuple: (display_list, total_sub_count) + """ display = [] total_sub = 0 for commit_hash, chash, subject in merges: @@ -1149,6 +1142,11 @@ def do_next_merges(args, dbs): else: display.append((chash, subject, False, None)) + return display, total_sub + + +def _next_show_merges(source, merges, display, total_sub): + """Display the next-merges listing.""" n_items = total_sub + len(merges) - len( [d for d in display if d[2]]) tout.info(f'Next merges from {source} ' @@ -1165,6 +1163,34 @@ def do_next_merges(args, dbs): tout.info(f' {idx}. {chash} {subject}') idx += 1 + +def do_next_merges(args, dbs): + """Show the next N merges to be applied from a source + + Args: + args (Namespace): Parsed arguments with 'source' and 'count' attributes + dbs (Database): Database instance + + Returns: + int: 0 on success, 1 if source not found + """ + source = args.source + count = args.count + + last_commit = dbs.source_get(source) + if not last_commit: + tout.error(f"Source '{source}' not found in database") + return 1 + + merges = _next_fetch_merges(last_commit, source, count) + if not merges: + tout.info('No merges remaining') + return 0 + + display, total_sub = _next_build_display(merges) + + _next_show_merges(source, merges, display, total_sub) + return 0 -- 2.43.0
From: Simon Glass <simon.glass@canonical.com> The three phases of decompose_mega_merge() all follow the same pattern: run git log, parse the output, filter out commits already in the database. Extract two helpers to reduce the duplication: - _mega_preadd(): pre-add the mega-merge commit as 'skipped' in the DB - _mega_get_batch(): common git-log/parse/filter pattern shared by all three phases This makes each phase a single call to _mega_get_batch() instead of repeated inline blocks. Co-developed-by: Claude Opus 4.6 <noreply@anthropic.com> Signed-off-by: Simon Glass <simon.glass@canonical.com> --- tools/pickman/control.py | 96 ++++++++++++++++++++++------------------ 1 file changed, 53 insertions(+), 43 deletions(-) diff --git a/tools/pickman/control.py b/tools/pickman/control.py index d3cb8a8fff4..19cd2813b64 100644 --- a/tools/pickman/control.py +++ b/tools/pickman/control.py @@ -964,6 +964,49 @@ def detect_sub_merges(merge_hash): return [line for line in out.split('\n') if line] +def _mega_preadd(dbs, merge_hash): + """Pre-add a mega-merge commit to the database as 'skipped'. + + This prevents the mega-merge from appearing as an orphan commit. + Does nothing if the commit already exists in the database. + """ + if dbs.commit_get(merge_hash): + return + + source_id = None + sources = dbs.source_get_all() + if sources: + source_id = dbs.source_get_id(sources[0][0]) + if source_id: + info = run_git(['log', '-1', '--format=%s|%an', merge_hash]) + parts = info.split('|', 1) + subject = parts[0] + author = parts[1] if len(parts) > 1 else '' + dbs.commit_add(merge_hash, source_id, subject, author, + status='skipped') + dbs.commit() + + +def _mega_get_batch(dbs, exclude_ref, include_ref): + """Fetch a batch of unprocessed commits between two refs. + + Runs git log for the range ^exclude_ref include_ref, parses the + output and filters out commits already in the database. + + Returns: + list: CommitInfo tuples for unprocessed commits, may be empty + """ + log_output = run_git([ + 'log', '--reverse', '--format=%H|%h|%an|%s|%P', + f'^{exclude_ref}', include_ref + ]) + if not log_output: + return [] + + all_commits = parse_log_output(log_output, has_parents=True) + return [c for c in all_commits if not dbs.commit_get(c.hash)] + + def decompose_mega_merge(dbs, prev_commit, merge_hash, sub_merges): """Return the next unprocessed batch from a mega-merge @@ -990,60 +1033,27 @@ def decompose_mega_merge(dbs, prev_commit, merge_hash, sub_merges): first_parent = parents[0] second_parent = parents[1] - # Pre-add the mega-merge commit itself as skipped - if not dbs.commit_get(merge_hash): - source_id = None - sources = dbs.source_get_all() - if sources: - source_id = dbs.source_get_id(sources[0][0]) - if source_id: - info = run_git(['log', '-1', '--format=%s|%an', merge_hash]) - parts = info.split('|', 1) - subject = parts[0] - author = parts[1] if len(parts) > 1 else '' - dbs.commit_add(merge_hash, source_id, subject, author, - status='skipped') - dbs.commit() + _mega_preadd(dbs, merge_hash) # Phase 1: mainline commits before the merge - log_output = run_git([ - 'log', '--reverse', '--format=%H|%h|%an|%s|%P', - f'{prev_commit}..{first_parent}' - ]) - if log_output: - all_commits = parse_log_output(log_output, has_parents=True) - commits = [c for c in all_commits if not dbs.commit_get(c.hash)] - if commits: - return commits, first_parent + commits = _mega_get_batch(dbs, prev_commit, first_parent) + if commits: + return commits, first_parent # Phase 2: sub-merge batches prev_sub = first_parent for sub_hash in sub_merges: - # Get commits for this sub-merge - log_output = run_git([ - 'log', '--reverse', '--format=%H|%h|%an|%s|%P', - f'^{prev_sub}', sub_hash - ]) - if log_output: - all_commits = parse_log_output(log_output, has_parents=True) - commits = [c for c in all_commits if not dbs.commit_get(c.hash)] - if commits: - return commits, None + commits = _mega_get_batch(dbs, prev_sub, sub_hash) + if commits: + return commits, None prev_sub = sub_hash # Phase 3: remainder after the last sub-merge last_sub = sub_merges[-1] if sub_merges else first_parent - log_output = run_git([ - 'log', '--reverse', '--format=%H|%h|%an|%s|%P', - f'^{last_sub}', second_parent - ]) - if log_output: - all_commits = parse_log_output(log_output, has_parents=True) - commits = [c for c in all_commits if not dbs.commit_get(c.hash)] - if commits: - return commits, None + commits = _mega_get_batch(dbs, last_sub, second_parent) + if commits: + return commits, None - # All done return [], None -- 2.43.0
From: Simon Glass <simon.glass@canonical.com> Extract two helpers from handle_already_applied() to separate its functionality: - _applied_advance_source(): choose and set the new source position from the three possible cases (advance_to, signal_commit, last hash) - _applied_create_skip_mr(): push a skip branch and create the MR This reduces handle_already_applied() to a short sequence of mark, advance, and optionally push. Co-developed-by: Claude Opus 4.6 <noreply@anthropic.com> Signed-off-by: Simon Glass <simon.glass@canonical.com> --- tools/pickman/control.py | 101 ++++++++++++++++++++++----------------- 1 file changed, 58 insertions(+), 43 deletions(-) diff --git a/tools/pickman/control.py b/tools/pickman/control.py index 19cd2813b64..39ce57d0895 100644 --- a/tools/pickman/control.py +++ b/tools/pickman/control.py @@ -1539,6 +1539,60 @@ def prepare_apply(dbs, source, branch): # pylint: disable=too-many-arguments +def _applied_advance_source(dbs, source, commits, advance_to, + signal_commit): + """Advance the source position after skipping already-applied commits. + + Chooses the new position from advance_to, signal_commit, or the + last commit hash, in that priority order. + """ + if advance_to is not None: + new_hash = advance_to + elif signal_commit: + new_hash = signal_commit + else: + new_hash = commits[-1].hash + + dbs.source_set(source, new_hash) + dbs.commit() + tout.info(f"Updated source '{source}' to {new_hash[:12]}") + + +def _applied_create_skip_mr(args, source, commits, branch_name, conv_log): + """Push a skip branch and create an MR recording the skip. + + Returns: + int: 0 on success, 1 on failure + """ + remote = args.remote + target = args.target + + try: + run_git(['checkout', '-b', branch_name, f'{remote}/{target}']) + except Exception: # pylint: disable=broad-except + # Branch may already exist from failed attempt + try: + run_git(['checkout', branch_name]) + except Exception: # pylint: disable=broad-except + tout.error(f'Could not create/checkout branch {branch_name}') + return 1 + + title = f'{SKIPPED_TAG} [pickman] {commits[-1].subject}' + summary = format_history(source, commits, branch_name) + description = (f'{summary}\n\n' + f'**Status:** Commits already applied to {target} ' + f'with different hashes.\n\n' + f'### Conversation log\n{conv_log}') + + mr_url = gitlab_api.push_and_create_mr( + remote, branch_name, target, title, description + ) + if not mr_url: + return 1 + + return 0 + + def handle_already_applied(dbs, source, commits, branch_name, conv_log, args, signal_commit, advance_to=None): """Handle the case where commits are already applied to the target branch @@ -1563,55 +1617,16 @@ def handle_already_applied(dbs, source, commits, branch_name, conv_log, args, """ tout.info('Commits already applied to target branch - creating skip MR') - # Mark commits as 'skipped' in database for commit in commits: dbs.commit_set_status(commit.hash, 'skipped') dbs.commit() - # Update source position - if advance_to is not None: - dbs.source_set(source, advance_to) - dbs.commit() - tout.info(f"Updated source '{source}' to {advance_to[:12]}") - elif signal_commit: - dbs.source_set(source, signal_commit) - dbs.commit() - tout.info(f"Updated source '{source}' to {signal_commit[:12]}") - else: - last_hash = commits[-1].hash - dbs.source_set(source, last_hash) - dbs.commit() - tout.info(f"Updated source '{source}' to {last_hash[:12]}") + _applied_advance_source(dbs, source, commits, advance_to, + signal_commit) - # Push and create MR with [skip] prefix if requested if args.push: - remote = args.remote - target = args.target - - # Create a skip branch from ci/master (no changes) - try: - run_git(['checkout', '-b', branch_name, f'{remote}/{target}']) - except Exception: # pylint: disable=broad-except - # Branch may already exist from failed attempt - try: - run_git(['checkout', branch_name]) - except Exception: # pylint: disable=broad-except - tout.error(f'Could not create/checkout branch {branch_name}') - return 1 - - # Use merge commit subject as title with [skip] prefix - title = f'{SKIPPED_TAG} [pickman] {commits[-1].subject}' - summary = format_history(source, commits, branch_name) - description = (f'{summary}\n\n' - f'**Status:** Commits already applied to {target} ' - f'with different hashes.\n\n' - f'### Conversation log\n{conv_log}') - - mr_url = gitlab_api.push_and_create_mr( - remote, branch_name, target, title, description - ) - if not mr_url: - return 1 + return _applied_create_skip_mr(args, source, commits, + branch_name, conv_log) return 0 -- 2.43.0
From: Simon Glass <simon.glass@canonical.com> Add test coverage for the remaining paths in prepare_apply(): - Existing branch deletion when the target branch already exists - The merge_found=True message and advance_to passthrough - The no-merge-found message for trailing commits Also initialise tout in setUp() so the output-checking tests can capture tout.info() messages. Co-developed-by: Claude Opus 4.6 <noreply@anthropic.com> Signed-off-by: Simon Glass <simon.glass@canonical.com> --- tools/pickman/ftest.py | 99 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 99 insertions(+) diff --git a/tools/pickman/ftest.py b/tools/pickman/ftest.py index f07e4ecb0db..a2c19a1159e 100644 --- a/tools/pickman/ftest.py +++ b/tools/pickman/ftest.py @@ -2677,6 +2677,105 @@ class TestPrepareApply(unittest.TestCase): self.assertEqual(info.branch_name, 'my-branch') dbs.close() + def test_prepare_apply_deletes_existing_branch(self): + """Test prepare_apply deletes a branch that already exists.""" + git_cmds = [] + + with terminal.capture(): + dbs = database.Database(self.db_path) + dbs.start() + dbs.source_set('us/next', 'abc123') + dbs.commit() + + log_output = 'aaa111|aaa111a|Author 1|First commit|abc123\n' + + def mock_git(pipe_list): + cmd = pipe_list[0] if pipe_list else [] + git_cmds.append(cmd) + if 'log' in cmd: + return command.CommandResult(stdout=log_output) + if 'rev-parse' in cmd: + return command.CommandResult(stdout='master') + if 'branch' in cmd and '--list' in cmd: + return command.CommandResult(stdout='cherry-aaa111a\n') + return command.CommandResult(stdout='') + + command.TEST_RESULT = mock_git + + info, ret = control.prepare_apply(dbs, 'us/next', None) + + self.assertIsNotNone(info) + self.assertEqual(ret, 0) + self.assertTrue( + any('branch' in c and '-D' in c for c in git_cmds)) + dbs.close() + + def test_prepare_apply_merge_found(self): + """Test prepare_apply sets merge_found and advance_to.""" + with terminal.capture(): + dbs = database.Database(self.db_path) + dbs.start() + dbs.source_set('us/next', 'abc123') + dbs.commit() + + merge_hash = 'ccc333ccc333ccc333' + + merge_info = control.NextCommitsInfo( + commits=[ + control.CommitInfo('aaa111', 'aaa111a', 'First commit', + 'Author 1'), + control.CommitInfo('bbb222', 'bbb222b', 'Second commit', + 'Author 2'), + ], + merge_found=True, + advance_to=merge_hash, + ) + + def mock_git(pipe_list): + cmd = pipe_list[0] if pipe_list else [] + if 'rev-parse' in cmd: + return command.CommandResult(stdout='master') + return command.CommandResult(stdout='') + + with mock.patch.object(control, 'get_next_commits', + return_value=(merge_info, None)): + command.TEST_RESULT = mock_git + info, ret = control.prepare_apply(dbs, 'us/next', None) + + self.assertIsNotNone(info) + self.assertEqual(ret, 0) + self.assertTrue(info.merge_found) + self.assertEqual(info.advance_to, merge_hash) + self.assertEqual(len(info.commits), 2) + dbs.close() + + def test_prepare_apply_no_merge(self): + """Test prepare_apply reports no merge found.""" + with terminal.capture(): + dbs = database.Database(self.db_path) + dbs.start() + dbs.source_set('us/next', 'abc123') + dbs.commit() + + log_output = 'aaa111|aaa111a|Author 1|First commit|abc123\n' + + def mock_git(pipe_list): + cmd = pipe_list[0] if pipe_list else [] + if 'log' in cmd: + return command.CommandResult(stdout=log_output) + if 'rev-parse' in cmd: + return command.CommandResult(stdout='master') + return command.CommandResult(stdout='') + + command.TEST_RESULT = mock_git + + info, ret = control.prepare_apply(dbs, 'us/next', None) + + self.assertIsNotNone(info) + self.assertEqual(ret, 0) + self.assertFalse(info.merge_found) + dbs.close() + class TestExecuteApply(unittest.TestCase): """Tests for execute_apply function.""" -- 2.43.0
From: Simon Glass <simon.glass@canonical.com> Create a new _prepare_get_commits() helper to handle finding the next set of commits to process. This helps to reduce the size of prepare_apply() Signed-off-by: Simon Glass <simon.glass@canonical.com> --- tools/pickman/control.py | 43 ++++++++++++++++++++++++++++------------ 1 file changed, 30 insertions(+), 13 deletions(-) diff --git a/tools/pickman/control.py b/tools/pickman/control.py index 39ce57d0895..3e69cba7a9a 100644 --- a/tools/pickman/control.py +++ b/tools/pickman/control.py @@ -1473,40 +1473,57 @@ def push_mr(args, branch_name, title, description): return bool(mr_url) -def prepare_apply(dbs, source, branch): - """Prepare for applying commits from a source branch +def _prepare_get_commits(dbs, source): + """Get the next commits to apply, handling skips. - Gets the next commits, sets up the branch name, and prints info about - what will be applied. + Fetches the next batch of commits from the source. Args: dbs (Database): Database instance source (str): Source branch name - branch (str): Branch name to use, or None to auto-generate Returns: - tuple: (ApplyInfo, return_code) where ApplyInfo is set if there are - commits to apply, or None with return_code indicating the result - (0 for no commits, 1 for error) + tuple: (NextCommitsInfo, return_code) where return_code is None + on success, or an int (0 or 1) if there is nothing to do """ info, err = get_next_commits(dbs, source) - if err: tout.error(err) return None, 1 if not info.commits: - # If advance_to is set, advance source past fully-processed merges if info.advance_to: dbs.source_set(source, info.advance_to) dbs.commit() tout.info(f"Advanced source '{source}' to " f'{info.advance_to[:12]}') - # Retry with updated position - return prepare_apply(dbs, source, branch) - tout.info('No new commits to cherry-pick') + else: + tout.info('No new commits to cherry-pick') return None, 0 + return info, None + + +def prepare_apply(dbs, source, branch): + """Prepare for applying commits from a source branch + + Gets the next commits, sets up the branch name, and prints info about + what will be applied. + + Args: + dbs (Database): Database instance + source (str): Source branch name + branch (str): Branch name to use, or None to auto-generate + + Returns: + tuple: (ApplyInfo, return_code) where ApplyInfo is set if there are + commits to apply, or None with return_code indicating the result + (0 for no commits, 1 for error) + """ + info, ret = _prepare_get_commits(dbs, source) + if ret is not None: + return None, ret + commits = info.commits # Save current branch to return to later -- 2.43.0
From: Simon Glass <simon.glass@canonical.com> When pickman cherry-picks a series from us/next, it may encounter commits that depend on a dts/upstream subtree update that is not yet in ci/master. Subtree merges cannot be cherry-picked; they must be applied with ./tools/update-subtree.sh pull dts <tag>. Add subtree-merge detection to find_unprocessed_commits(). When a merge subject matches "Subtree merge tag '<tag>' of ... into <path>", pickman checks out the target branch, runs update-subtree.sh, marks both the squash and merge commits as applied, advances the source position, and retries to get the next batch. This handles dts/upstream, lib/mbedtls/external/mbedtls and lib/lwip/lwip subtrees. Co-developed-by: Claude Opus 4.6 <noreply@anthropic.com> Signed-off-by: Simon Glass <simon.glass@canonical.com> --- tools/pickman/README.rst | 33 ++ tools/pickman/control.py | 207 +++++++++++-- tools/pickman/ftest.py | 640 ++++++++++++++++++++++++++++++++++++++- 3 files changed, 844 insertions(+), 36 deletions(-) diff --git a/tools/pickman/README.rst b/tools/pickman/README.rst index 5c6121610be..70309032838 100644 --- a/tools/pickman/README.rst +++ b/tools/pickman/README.rst @@ -111,6 +111,39 @@ If there are no merge commits between the last processed commit and the branch tip, pickman includes all remaining commits in a single set. This is noted in the output as "no merge found". +Subtree Merge Handling +---------------------- + +The source branch may contain subtree merges that update vendored trees such as +``dts/upstream``, ``lib/mbedtls/external/mbedtls`` or ``lib/lwip/lwip``. These +appear as a pair of commits on the first-parent chain: + +1. ``Squashed 'dts/upstream/' changes from <old>..<new>`` (the actual file + changes) +2. ``Subtree merge tag '<tag>' of <repo> into dts/upstream`` (the merge commit + joining histories) + +These commits cannot be cherry-picked. Pickman detects them automatically by +matching the merge subject against the pattern +``Subtree merge tag '<tag>' of ... into <path>``. When a subtree merge is found, +pickman: + +1. Checks out the target branch (e.g. ``ci/master``) +2. Runs ``./tools/update-subtree.sh pull <name> <tag>`` to apply the update +3. Pushes the target branch (if ``--push`` is active) +4. Records both the squash and merge commits as 'applied' in the database +5. Advances the source position past the merge and continues with the next batch + +This is works without manual intervention. The currently supported subtrees are: + +================================= =========== +Path Name +================================= =========== +``dts/upstream`` ``dts`` +``lib/mbedtls/external/mbedtls`` ``mbedtls`` +``lib/lwip/lwip`` ``lwip`` +================================= =========== + Skipping MRs ------------ diff --git a/tools/pickman/control.py b/tools/pickman/control.py index 3e69cba7a9a..48f7ced1617 100644 --- a/tools/pickman/control.py +++ b/tools/pickman/control.py @@ -46,6 +46,17 @@ RE_GIT_STAT_FILE = re.compile(r'^([^|]+)\s*\|') # Extract hash from line like "(cherry picked from commit abc123def)" RE_CHERRY_PICK = re.compile(r'cherry picked from commit ([a-f0-9]+)') +# Detect subtree merge commits on the first-parent chain +RE_SUBTREE_MERGE = re.compile( + r"Subtree merge tag '([^']+)' of .* into (.*)") + +# Map from subtree path to update-subtree.sh name +SUBTREE_NAMES = { + 'dts/upstream': 'dts', + 'lib/mbedtls/external/mbedtls': 'mbedtls', + 'lib/lwip/lwip': 'lwip', +} + # Named tuple for commit info Commit = namedtuple('Commit', ['hash', 'chash', 'subject', 'date']) @@ -92,8 +103,11 @@ AgentCommit = namedtuple('AgentCommit', # commits: list of CommitInfo to cherry-pick # merge_found: True if these commits came from a merge on the source branch # advance_to: hash to advance the source position to, or None to stay put +# subtree_update: (name, tag) tuple if a subtree update is needed, else None NextCommitsInfo = namedtuple('NextCommitsInfo', - ['commits', 'merge_found', 'advance_to']) + ['commits', 'merge_found', 'advance_to', + 'subtree_update'], + defaults=[None]) # Named tuple for prepare_apply() result # @@ -747,6 +761,22 @@ def do_check_gitlab(args, dbs): # pylint: disable=unused-argument return 0 +def _check_subtree_merge(merge_hash): + """Check if a merge commit is a subtree merge. + + Returns: + tuple: (name, tag) where name is the subtree name (or None for + unknown paths), or None if not a subtree merge + """ + subject = run_git(['log', '-1', '--format=%s', merge_hash]) + match = RE_SUBTREE_MERGE.match(subject) + if not match: + return None + tag = match.group(1) + path = match.group(2) + return SUBTREE_NAMES.get(path), tag + + def find_unprocessed_commits(dbs, last_commit, source, merge_hashes): """Find the first merge with unprocessed commits @@ -766,6 +796,18 @@ def find_unprocessed_commits(dbs, last_commit, source, merge_hashes): prev_commit = last_commit skipped_merges = False for merge_hash in merge_hashes: + # Check for subtree merge (e.g. dts/upstream update) + result = _check_subtree_merge(merge_hash) + if result is not None: + name, tag = result + if name: + return NextCommitsInfo([], True, merge_hash, + (name, tag)) + # Unknown subtree path - skip past it + prev_commit = merge_hash + skipped_merges = True + continue + # Check for mega-merge (contains sub-merges) sub_merges = detect_sub_merges(merge_hash) if sub_merges: @@ -1473,54 +1515,175 @@ def push_mr(args, branch_name, title, description): return bool(mr_url) -def _prepare_get_commits(dbs, source): - """Get the next commits to apply, handling skips. +def _subtree_run_update(name, tag): + """Run update-subtree.sh to pull a subtree update. + + On failure, aborts any in-progress merge to clean up the working + tree. + + Returns: + int: 0 on success, 1 on failure + """ + try: + result = command.run( + './tools/update-subtree.sh', 'pull', name, tag, + capture=True, raise_on_error=False) + if result.stdout: + tout.info(result.stdout) + if result.return_code: + tout.error(f'Subtree update failed (exit code ' + f'{result.return_code})') + if result.stderr: + tout.error(result.stderr) + try: + run_git(['merge', '--abort']) + except Exception: # pylint: disable=broad-except + pass + return 1 + except Exception as exc: # pylint: disable=broad-except + tout.error(f'Subtree update failed: {exc}') + return 1 + + return 0 + - Fetches the next batch of commits from the source. +def _subtree_record(dbs, source, squash_hash, merge_hash): + """Mark subtree commits as applied and advance the source position.""" + source_id = dbs.source_get_id(source) + for commit_hash in [squash_hash, merge_hash]: + if not dbs.commit_get(commit_hash): + info = run_git(['log', '-1', '--format=%s|%an', commit_hash]) + parts = info.split('|', 1) + subj = parts[0] + auth = parts[1] if len(parts) > 1 else '' + dbs.commit_add(commit_hash, source_id, subj, auth, + status='applied') + dbs.commit() + + dbs.source_set(source, merge_hash) + dbs.commit() + tout.info(f"Advanced source '{source}' past subtree merge " + f'{merge_hash[:12]}') + + +def apply_subtree_update(dbs, source, name, tag, merge_hash, args): # pylint: disable=too-many-arguments + """Apply a subtree update on the target branch + + Runs tools/update-subtree.sh to pull the subtree update, then + optionally pushes the result to the remote target branch. Args: dbs (Database): Database instance source (str): Source branch name + name (str): Subtree name ('dts', 'mbedtls', 'lwip') + tag (str): Tag to pull (e.g. 'v6.15-dts') + merge_hash (str): Hash of the subtree merge commit to advance past + args (Namespace): Parsed arguments with 'push', 'remote', 'target' + + Returns: + int: 0 on success, 1 on failure + """ + target = args.target + + tout.info(f'Applying subtree update: {name} -> {tag}') + + # Get the squash commit (second parent of the merge) + parents = run_git(['rev-parse', f'{merge_hash}^@']).split() + if len(parents) < 2: + tout.error(f'Subtree merge {merge_hash[:12]} has no second parent') + return 1 + squash_hash = parents[1] + + try: + run_git(['checkout', target]) + except Exception: # pylint: disable=broad-except + tout.error(f'Could not checkout {target}') + return 1 + + ret = _subtree_run_update(name, tag) + if ret: + return ret + + if args.push: + try: + gitlab_api.push_branch(args.remote, target, skip_ci=True) + tout.info(f'Pushed {target} to {args.remote}') + except Exception as exc: # pylint: disable=broad-except + tout.error(f'Failed to push {target}: {exc}') + return 1 + + _subtree_record(dbs, source, squash_hash, merge_hash) + + return 0 + + +def _prepare_get_commits(dbs, source, args): + """Get the next commits to apply, handling subtrees and skips. + + Fetch the next batch of commits from the source. If a subtree + update is encountered, apply it and retry. If all commits in a + merge are already processed, advance the source and retry. + + Args: + dbs (Database): Database instance + source (str): Source branch name + args (Namespace): Parsed arguments (needed for subtree updates) Returns: tuple: (NextCommitsInfo, return_code) where return_code is None on success, or an int (0 or 1) if there is nothing to do """ - info, err = get_next_commits(dbs, source) - if err: - tout.error(err) - return None, 1 + while True: + info, err = get_next_commits(dbs, source) + if err: + tout.error(err) + return None, 1 + + if info.subtree_update: + name, tag = info.subtree_update + tout.info(f'Subtree update needed: {name} -> {tag}') + if not args: + tout.error('Cannot apply subtree update without args') + return None, 1 + ret = apply_subtree_update(dbs, source, name, tag, + info.advance_to, args) + if ret: + return None, ret + continue - if not info.commits: - if info.advance_to: - dbs.source_set(source, info.advance_to) - dbs.commit() - tout.info(f"Advanced source '{source}' to " - f'{info.advance_to[:12]}') - else: + if not info.commits: + if info.advance_to: + dbs.source_set(source, info.advance_to) + dbs.commit() + tout.info(f"Advanced source '{source}' to " + f'{info.advance_to[:12]}') + continue tout.info('No new commits to cherry-pick') - return None, 0 + return None, 0 - return info, None + return info, None -def prepare_apply(dbs, source, branch): +def prepare_apply(dbs, source, branch, args=None): """Prepare for applying commits from a source branch - Gets the next commits, sets up the branch name, and prints info about - what will be applied. + Get the next commits, set up the branch name and prints info about + what will be applied. When a subtree update is encountered, apply it + automatically and retry. Args: dbs (Database): Database instance source (str): Source branch name branch (str): Branch name to use, or None to auto-generate + args (Namespace): Parsed arguments with 'push', 'remote', 'target' + (needed for subtree updates) Returns: tuple: (ApplyInfo, return_code) where ApplyInfo is set if there are commits to apply, or None with return_code indicating the result (0 for no commits, 1 for error) """ - info, ret = _prepare_get_commits(dbs, source) + info, ret = _prepare_get_commits(dbs, source, args) if ret is not None: return None, ret @@ -1738,7 +1901,7 @@ def do_apply(args, dbs): int: 0 on success, 1 on failure """ source = args.source - info, ret = prepare_apply(dbs, source, args.branch) + info, ret = prepare_apply(dbs, source, args.branch, args) if not info: return ret diff --git a/tools/pickman/ftest.py b/tools/pickman/ftest.py index a2c19a1159e..de6bce40614 100644 --- a/tools/pickman/ftest.py +++ b/tools/pickman/ftest.py @@ -2706,6 +2706,7 @@ class TestPrepareApply(unittest.TestCase): self.assertIsNotNone(info) self.assertEqual(ret, 0) + # Check that branch -D was called self.assertTrue( any('branch' in c and '-D' in c for c in git_cmds)) dbs.close() @@ -3463,24 +3464,28 @@ class TestGetNextCommitsMegaMerge(unittest.TestCase): stdout='mega|mega1|A|Merge branch next|' 'base second_parent\n') if call_count[0] == 2: + # Subtree check: log -1 --format=%s + return command.CommandResult( + stdout='Merge branch next') + if call_count[0] == 3: # detect_sub_merges: rev-parse ^@ return command.CommandResult( stdout='base\nsecond_parent\n') - if call_count[0] == 3: + if call_count[0] == 4: # detect_sub_merges: log --merges (found sub-merges) return command.CommandResult(stdout='sub1\n') - if call_count[0] == 4: + if call_count[0] == 5: # decompose: rev-parse ^@ for mega-merge return command.CommandResult( stdout='base\nsecond_parent\n') - if call_count[0] == 5: + if call_count[0] == 6: # decompose: log -1 for mega-merge info return command.CommandResult( stdout='Mega merge|Author\n') - if call_count[0] == 6: + if call_count[0] == 7: # decompose: mainline commits (empty) return command.CommandResult(stdout='') - if call_count[0] == 7: + if call_count[0] == 8: # decompose: sub-merge 1 commits return command.CommandResult( stdout='aaa|aaa1|A|Sub commit|base\n') @@ -3522,31 +3527,35 @@ class TestGetNextCommitsMegaMerge(unittest.TestCase): stdout='mega|mega1|A|Merge branch next|' 'base second_parent\n') if call_count[0] == 2: + # Subtree check: log -1 --format=%s + return command.CommandResult( + stdout='Merge branch next') + if call_count[0] == 3: # detect_sub_merges: rev-parse ^@ return command.CommandResult( stdout='base\nsecond_parent\n') - if call_count[0] == 3: + if call_count[0] == 4: # detect_sub_merges: log --merges return command.CommandResult(stdout='sub1\n') - if call_count[0] == 4: + if call_count[0] == 5: # decompose: rev-parse ^@ return command.CommandResult( stdout='base\nsecond_parent\n') - if call_count[0] == 5: + if call_count[0] == 6: # decompose: log -1 for mega-merge info return command.CommandResult( stdout='Mega merge|Author\n') - if call_count[0] == 6: + if call_count[0] == 7: # decompose: mainline (empty) return command.CommandResult(stdout='') - if call_count[0] == 7: + if call_count[0] == 8: # decompose: sub-merge 1 (in DB) return command.CommandResult( stdout='aaa|aaa1|A|Sub commit|base\n') - if call_count[0] == 8: + if call_count[0] == 9: # decompose: remainder (empty) return command.CommandResult(stdout='') - if call_count[0] == 9: + if call_count[0] == 10: # Remaining commits after mega-merge (empty) return command.CommandResult(stdout='') return command.CommandResult(stdout='') @@ -3580,13 +3589,17 @@ class TestGetNextCommitsMegaMerge(unittest.TestCase): stdout='merge1|m1|A|Merge branch feat|' 'base side1\n') if call_count[0] == 2: + # Subtree check: log -1 --format=%s + return command.CommandResult( + stdout='Merge branch feat') + if call_count[0] == 3: # detect_sub_merges: rev-parse ^@ return command.CommandResult( stdout='base\nside1\n') - if call_count[0] == 3: + if call_count[0] == 4: # detect_sub_merges: log --merges (no sub-merges) return command.CommandResult(stdout='') - if call_count[0] == 4: + if call_count[0] == 5: # Commits for this merge return command.CommandResult( stdout='aaa|aaa1|A|Commit 1|base\n' @@ -3606,6 +3619,605 @@ class TestGetNextCommitsMegaMerge(unittest.TestCase): dbs.close() +class TestSubtreeMergeDetection(unittest.TestCase): + """Tests for subtree merge detection in find_unprocessed_commits.""" + + def setUp(self): + """Set up test fixtures.""" + fd, self.db_path = tempfile.mkstemp(suffix='.db') + os.close(fd) + os.unlink(self.db_path) + database.Database.instances.clear() + + def tearDown(self): + """Clean up test fixtures.""" + if os.path.exists(self.db_path): + os.unlink(self.db_path) + database.Database.instances.clear() + command.TEST_RESULT = None + + def test_detects_dts_subtree_merge(self): + """Test find_unprocessed_commits detects dts/upstream subtree merge.""" + with terminal.capture(): + dbs = database.Database(self.db_path) + dbs.start() + dbs.source_set('us/next', 'base') + dbs.commit() + + command.TEST_RESULT = command.CommandResult( + stdout="Subtree merge tag 'v6.15-dts' of " + "https://example.com/dts.git into dts/upstream") + + info = control.find_unprocessed_commits( + dbs, 'base', 'us/next', ['merge1']) + + self.assertTrue(info.merge_found) + self.assertEqual(info.commits, []) + self.assertEqual(info.advance_to, 'merge1') + self.assertEqual(info.subtree_update, ('dts', 'v6.15-dts')) + dbs.close() + + def test_detects_mbedtls_subtree_merge(self): + """Test find_unprocessed_commits detects mbedtls subtree merge.""" + with terminal.capture(): + dbs = database.Database(self.db_path) + dbs.start() + dbs.source_set('us/next', 'base') + dbs.commit() + + command.TEST_RESULT = command.CommandResult( + stdout="Subtree merge tag 'v3.6.2' of " + "https://example.com/mbedtls.git into " + "lib/mbedtls/external/mbedtls") + + info = control.find_unprocessed_commits( + dbs, 'base', 'us/next', ['merge1']) + + self.assertEqual(info.subtree_update, + ('mbedtls', 'v3.6.2')) + dbs.close() + + def test_detects_lwip_subtree_merge(self): + """Test find_unprocessed_commits detects lwip subtree merge.""" + with terminal.capture(): + dbs = database.Database(self.db_path) + dbs.start() + dbs.source_set('us/next', 'base') + dbs.commit() + + command.TEST_RESULT = command.CommandResult( + stdout="Subtree merge tag 'STABLE-2_2_0' of " + "https://example.com/lwip.git into lib/lwip/lwip") + + info = control.find_unprocessed_commits( + dbs, 'base', 'us/next', ['merge1']) + + self.assertEqual(info.subtree_update, + ('lwip', 'STABLE-2_2_0')) + dbs.close() + + def test_skips_unknown_subtree_path(self): + """Test find_unprocessed_commits skips unknown subtree paths.""" + with terminal.capture(): + dbs = database.Database(self.db_path) + dbs.start() + dbs.source_set('us/next', 'base') + dbs.commit() + + call_count = [0] + + def mock_git(pipe_list): # pylint: disable=unused-argument + call_count[0] += 1 + if call_count[0] == 1: + # Subject for merge1: unknown subtree + return command.CommandResult( + stdout="Subtree merge tag 'v1.0' of " + "https://x.com/x.git into lib/unknown") + if call_count[0] == 2: + # Subject for merge2: not a subtree merge + return command.CommandResult( + stdout='Normal merge commit') + if call_count[0] == 3: + # detect_sub_merges: rev-parse ^@ + return command.CommandResult( + stdout='merge1\nside1\n') + if call_count[0] == 4: + # detect_sub_merges: log --merges (no sub-merges) + return command.CommandResult(stdout='') + if call_count[0] == 5: + # Commits for merge2 + return command.CommandResult( + stdout='aaa|aaa1|A|Commit 1|merge1\n') + return command.CommandResult(stdout='') + + command.TEST_RESULT = mock_git + + info = control.find_unprocessed_commits( + dbs, 'base', 'us/next', ['merge1', 'merge2']) + + # Should have skipped merge1 and found commits in merge2 + self.assertIsNone(info.subtree_update) + self.assertTrue(info.merge_found) + self.assertEqual(len(info.commits), 1) + self.assertEqual(info.commits[0].chash, 'aaa1') + dbs.close() + + def test_subtree_merge_via_get_next_commits(self): + """Test get_next_commits returns subtree_update for subtree merge.""" + with terminal.capture(): + dbs = database.Database(self.db_path) + dbs.start() + dbs.source_set('us/next', 'base') + dbs.commit() + + call_count = [0] + + def mock_git(pipe_list): # pylint: disable=unused-argument + call_count[0] += 1 + if call_count[0] == 1: + # First-parent log shows one merge + return command.CommandResult( + stdout='merge1|m1|A|Subtree merge tag ' + "'v6.15-dts' of https://x.com/dts.git" + ' into dts/upstream|base second\n') + if call_count[0] == 2: + # find_unprocessed: log -1 --format=%s for merge1 + return command.CommandResult( + stdout="Subtree merge tag 'v6.15-dts' of " + "https://x.com/dts.git into " + "dts/upstream") + return command.CommandResult(stdout='') + + command.TEST_RESULT = mock_git + + info, err = control.get_next_commits(dbs, 'us/next') + + self.assertIsNone(err) + self.assertEqual(info.subtree_update, ('dts', 'v6.15-dts')) + self.assertEqual(info.advance_to, 'merge1') + self.assertEqual(info.commits, []) + dbs.close() + + def test_non_subtree_merge_has_no_subtree_update(self): + """Test normal merges have subtree_update=None.""" + with terminal.capture(): + dbs = database.Database(self.db_path) + dbs.start() + dbs.source_set('us/next', 'base') + dbs.commit() + + call_count = [0] + + def mock_git(pipe_list): # pylint: disable=unused-argument + call_count[0] += 1 + if call_count[0] == 1: + # Subject: not a subtree merge + return command.CommandResult( + stdout='Merge branch some-feature') + if call_count[0] == 2: + # detect_sub_merges: rev-parse ^@ + return command.CommandResult( + stdout='base\nside1\n') + if call_count[0] == 3: + # detect_sub_merges: log --merges (no sub-merges) + return command.CommandResult(stdout='') + if call_count[0] == 4: + # Commits in merge + return command.CommandResult( + stdout='aaa|aaa1|A|Commit 1|base\n') + return command.CommandResult(stdout='') + + command.TEST_RESULT = mock_git + + info = control.find_unprocessed_commits( + dbs, 'base', 'us/next', ['merge1']) + + self.assertIsNone(info.subtree_update) + self.assertTrue(info.merge_found) + self.assertEqual(len(info.commits), 1) + dbs.close() + + +class TestApplySubtreeUpdate(unittest.TestCase): + """Tests for apply_subtree_update function.""" + + def setUp(self): + """Set up test fixtures.""" + fd, self.db_path = tempfile.mkstemp(suffix='.db') + os.close(fd) + os.unlink(self.db_path) + database.Database.instances.clear() + + def tearDown(self): + """Clean up test fixtures.""" + if os.path.exists(self.db_path): + os.unlink(self.db_path) + database.Database.instances.clear() + command.TEST_RESULT = None + + def test_apply_success(self): + """Test apply_subtree_update succeeds and updates database.""" + with terminal.capture(): + dbs = database.Database(self.db_path) + dbs.start() + dbs.source_set('us/next', 'base') + dbs.commit() + + args = argparse.Namespace(push=False, remote='ci', + target='master') + + def run_git_handler(git_args): + if 'rev-parse' in git_args: + # Parents of merge: first_parent squash_hash + return 'first_parent\nsquash_hash' + if 'checkout' in git_args: + return '' + if '--format=%s|%an' in git_args: + if 'squash_hash' in git_args: + return "Squashed 'dts/upstream/' changes|Author" + return "Subtree merge tag 'v6.15-dts'|Author" + return '' + + mock_result = command.CommandResult( + 'Subtree updated', '', '', 0) + with mock.patch.object(control, 'run_git', + side_effect=run_git_handler): + with mock.patch.object( + control.command, 'run', + return_value=mock_result): + ret = control.apply_subtree_update( + dbs, 'us/next', 'dts', 'v6.15-dts', + 'merge_hash', args) + + self.assertEqual(ret, 0) + + # Source should be advanced past the merge + self.assertEqual(dbs.source_get('us/next'), 'merge_hash') + + # Both commits should be in the database + squash = dbs.commit_get('squash_hash') + self.assertIsNotNone(squash) + merge = dbs.commit_get('merge_hash') + self.assertIsNotNone(merge) + dbs.close() + + def test_apply_with_push(self): + """Test apply_subtree_update pushes when args.push is True.""" + with terminal.capture(): + dbs = database.Database(self.db_path) + dbs.start() + dbs.source_set('us/next', 'base') + dbs.commit() + + args = argparse.Namespace(push=True, remote='ci', + target='master') + + def run_git_handler(git_args): + if 'rev-parse' in git_args: + return 'first_parent\nsquash_hash' + if 'checkout' in git_args: + return '' + if '--format=%s|%an' in git_args: + return 'Commit subject|Author' + return '' + + pushed = [False] + + def mock_push(remote, branch, skip_ci=False): + pushed[0] = True + return True + + mock_result = command.CommandResult('ok', '', '', 0) + with mock.patch.object(control, 'run_git', + side_effect=run_git_handler): + with mock.patch.object( + control.command, 'run', + return_value=mock_result): + with mock.patch.object( + control.gitlab_api, 'push_branch', + side_effect=mock_push): + ret = control.apply_subtree_update( + dbs, 'us/next', 'dts', 'v6.15-dts', + 'merge_hash', args) + + self.assertEqual(ret, 0) + self.assertTrue(pushed[0]) + dbs.close() + + def test_apply_checkout_failure(self): + """Test apply_subtree_update returns 1 on checkout failure.""" + with terminal.capture(): + dbs = database.Database(self.db_path) + dbs.start() + dbs.source_set('us/next', 'base') + dbs.commit() + + args = argparse.Namespace(push=False, remote='ci', + target='master') + + def run_git_handler(git_args): + if 'rev-parse' in git_args: + return 'first_parent\nsquash_hash' + if 'checkout' in git_args: + raise Exception('checkout failed') + return '' + + with mock.patch.object(control, 'run_git', + side_effect=run_git_handler): + ret = control.apply_subtree_update( + dbs, 'us/next', 'dts', 'v6.15-dts', + 'merge_hash', args) + + self.assertEqual(ret, 1) + # Source should not be advanced + self.assertEqual(dbs.source_get('us/next'), 'base') + dbs.close() + + def test_apply_no_second_parent(self): + """Test apply_subtree_update returns 1 when merge has no 2nd parent.""" + with terminal.capture(): + dbs = database.Database(self.db_path) + dbs.start() + dbs.source_set('us/next', 'base') + dbs.commit() + + args = argparse.Namespace(push=False, remote='ci', + target='master') + + # Only one parent + with mock.patch.object(control, 'run_git', + return_value='single_parent'): + ret = control.apply_subtree_update( + dbs, 'us/next', 'dts', 'v6.15-dts', + 'merge_hash', args) + + self.assertEqual(ret, 1) + dbs.close() + + def test_apply_script_exception(self): + """Test apply_subtree_update returns 1 on script exception.""" + with terminal.capture(): + dbs = database.Database(self.db_path) + dbs.start() + dbs.source_set('us/next', 'base') + dbs.commit() + + args = argparse.Namespace(push=False, remote='ci', + target='master') + + def run_git_handler(git_args): + if 'rev-parse' in git_args: + return 'first_parent\nsquash_hash' + if 'checkout' in git_args: + return '' + return '' + + with mock.patch.object(control, 'run_git', + side_effect=run_git_handler): + with mock.patch.object( + control.command, 'run', + side_effect=Exception('script failed')): + ret = control.apply_subtree_update( + dbs, 'us/next', 'dts', 'v6.15-dts', + 'merge_hash', args) + + self.assertEqual(ret, 1) + # Source should not be advanced + self.assertEqual(dbs.source_get('us/next'), 'base') + dbs.close() + + def test_apply_merge_conflict(self): + """Test apply_subtree_update aborts merge on non-zero exit.""" + with terminal.capture(): + dbs = database.Database(self.db_path) + dbs.start() + dbs.source_set('us/next', 'base') + dbs.commit() + + args = argparse.Namespace(push=False, remote='ci', + target='master') + + merge_aborted = [False] + + def run_git_handler(git_args): + if 'rev-parse' in git_args: + return 'first_parent\nsquash_hash' + if 'checkout' in git_args: + return '' + if 'merge' in git_args and '--abort' in git_args: + merge_aborted[0] = True + return '' + return '' + + mock_result = command.CommandResult( + '', 'CONFLICT (content): Merge conflict', '', 1) + with mock.patch.object(control, 'run_git', + side_effect=run_git_handler): + with mock.patch.object( + control.command, 'run', + return_value=mock_result): + ret = control.apply_subtree_update( + dbs, 'us/next', 'dts', 'v6.15-dts', + 'merge_hash', args) + + self.assertEqual(ret, 1) + self.assertTrue(merge_aborted[0]) + # Source should not be advanced + self.assertEqual(dbs.source_get('us/next'), 'base') + dbs.close() + + +class TestPrepareApplySubtreeUpdate(unittest.TestCase): + """Tests for prepare_apply handling of subtree updates.""" + + def setUp(self): + """Set up test fixtures.""" + fd, self.db_path = tempfile.mkstemp(suffix='.db') + os.close(fd) + os.unlink(self.db_path) + self.old_db_fname = control.DB_FNAME + control.DB_FNAME = self.db_path + database.Database.instances.clear() + + def tearDown(self): + """Clean up test fixtures.""" + control.DB_FNAME = self.old_db_fname + if os.path.exists(self.db_path): + os.unlink(self.db_path) + database.Database.instances.clear() + command.TEST_RESULT = None + + def test_prepare_apply_calls_subtree_update(self): + """Test prepare_apply applies subtree update and retries.""" + with terminal.capture(): + dbs = database.Database(self.db_path) + dbs.start() + dbs.source_set('us/next', 'base') + dbs.commit() + + args = argparse.Namespace(push=False, remote='ci', + target='master') + subtree_info = control.NextCommitsInfo( + [], True, 'merge1', ('dts', 'v6.15-dts')) + normal_info = control.NextCommitsInfo([], False, None) + + call_count = [0] + + def mock_get_next(dbs_arg, source): + call_count[0] += 1 + if call_count[0] == 1: + return subtree_info, None + return normal_info, None + + with mock.patch.object(control, 'get_next_commits', + side_effect=mock_get_next): + with mock.patch.object( + control, 'apply_subtree_update', + return_value=0) as mock_apply: + info, ret = control.prepare_apply( + dbs, 'us/next', None, args) + + # Should have called apply_subtree_update + mock_apply.assert_called_once_with( + dbs, 'us/next', 'dts', 'v6.15-dts', 'merge1', args) + # No commits after retry, so returns None/0 + self.assertIsNone(info) + self.assertEqual(ret, 0) + dbs.close() + + def test_prepare_apply_subtree_update_failure(self): + """Test prepare_apply returns error when subtree update fails.""" + with terminal.capture(): + dbs = database.Database(self.db_path) + dbs.start() + dbs.source_set('us/next', 'base') + dbs.commit() + + args = argparse.Namespace(push=False, remote='ci', + target='master') + subtree_info = control.NextCommitsInfo( + [], True, 'merge1', ('dts', 'v6.15-dts')) + + with mock.patch.object(control, 'get_next_commits', + return_value=(subtree_info, None)): + with mock.patch.object( + control, 'apply_subtree_update', + return_value=1): + info, ret = control.prepare_apply( + dbs, 'us/next', None, args) + + self.assertIsNone(info) + self.assertEqual(ret, 1) + dbs.close() + + def test_prepare_apply_subtree_without_args(self): + """Test prepare_apply returns error when subtree needs args=None.""" + with terminal.capture(): + dbs = database.Database(self.db_path) + dbs.start() + dbs.source_set('us/next', 'base') + dbs.commit() + + subtree_info = control.NextCommitsInfo( + [], True, 'merge1', ('dts', 'v6.15-dts')) + + with mock.patch.object(control, 'get_next_commits', + return_value=(subtree_info, None)): + info, ret = control.prepare_apply( + dbs, 'us/next', None) + + self.assertIsNone(info) + self.assertEqual(ret, 1) + dbs.close() + + +class TestNextCommitsInfoDefault(unittest.TestCase): + """Tests for NextCommitsInfo subtree_update default value.""" + + def test_default_subtree_update_is_none(self): + """Test NextCommitsInfo defaults subtree_update to None.""" + info = control.NextCommitsInfo([], False, None) + self.assertIsNone(info.subtree_update) + + def test_explicit_subtree_update(self): + """Test NextCommitsInfo accepts explicit subtree_update.""" + info = control.NextCommitsInfo([], True, 'hash1', + ('dts', 'v6.15-dts')) + self.assertEqual(info.subtree_update, ('dts', 'v6.15-dts')) + + def test_explicit_none_subtree_update(self): + """Test NextCommitsInfo accepts explicit None subtree_update.""" + info = control.NextCommitsInfo([], False, None, None) + self.assertIsNone(info.subtree_update) + + +class TestSubtreeMergeRegex(unittest.TestCase): + """Tests for RE_SUBTREE_MERGE regex pattern.""" + + def test_matches_dts_merge(self): + """Test regex matches dts subtree merge subject.""" + subject = ("Subtree merge tag 'v6.15-dts' of " + "https://git.kernel.org/pub/scm/linux/kernel/git/" + "devicetree/devicetree-rebasing.git into dts/upstream") + match = control.RE_SUBTREE_MERGE.match(subject) + self.assertIsNotNone(match) + self.assertEqual(match.group(1), 'v6.15-dts') + self.assertEqual(match.group(2), 'dts/upstream') + + def test_matches_mbedtls_merge(self): + """Test regex matches mbedtls subtree merge subject.""" + subject = ("Subtree merge tag 'v3.6.2' of " + "https://github.com/Mbed-TLS/mbedtls.git into " + "lib/mbedtls/external/mbedtls") + match = control.RE_SUBTREE_MERGE.match(subject) + self.assertIsNotNone(match) + self.assertEqual(match.group(1), 'v3.6.2') + self.assertEqual(match.group(2), 'lib/mbedtls/external/mbedtls') + + def test_matches_lwip_merge(self): + """Test regex matches lwip subtree merge subject.""" + subject = ("Subtree merge tag 'STABLE-2_2_0' of " + "https://git.savannah.gnu.org/git/lwip.git into " + "lib/lwip/lwip") + match = control.RE_SUBTREE_MERGE.match(subject) + self.assertIsNotNone(match) + self.assertEqual(match.group(1), 'STABLE-2_2_0') + self.assertEqual(match.group(2), 'lib/lwip/lwip') + + def test_no_match_normal_merge(self): + """Test regex does not match normal merge subjects.""" + subject = "Merge branch 'feature-xyz' into main" + match = control.RE_SUBTREE_MERGE.match(subject) + self.assertIsNone(match) + + def test_no_match_squash_commit(self): + """Test regex does not match subtree squash commits.""" + subject = ("Squashed 'dts/upstream/' changes from " + "v6.14-dts..v6.15-dts") + match = control.RE_SUBTREE_MERGE.match(subject) + self.assertIsNone(match) + + class TestDoCommitSourceResolveError(unittest.TestCase): """Tests for do_commit_source error handling.""" -- 2.43.0
From: Simon Glass <simon.glass@canonical.com> The agent-message-streaming pattern (async iteration, text extraction and conversation-log collection) is duplicated in run() and run_review_agent() Extract it into a shared run_agent_collect() helper. Co-developed-by: Claude Opus 4.6 <noreply@anthropic.com> Signed-off-by: Simon Glass <simon.glass@canonical.com> --- tools/pickman/agent.py | 55 ++++++++++++++++++++----------------- tools/pickman/ftest.py | 62 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 92 insertions(+), 25 deletions(-) diff --git a/tools/pickman/agent.py b/tools/pickman/agent.py index 85f8efee1df..63952c1c005 100644 --- a/tools/pickman/agent.py +++ b/tools/pickman/agent.py @@ -55,6 +55,34 @@ def check_available(): return True +async def run_agent_collect(prompt, options): + """Run a Claude agent and collect its conversation log + + Sends the prompt to a Claude agent, streams output to stdout and + collects all text blocks into a conversation log. + + Args: + prompt (str): The prompt to send to the agent + options (ClaudeAgentOptions): Agent configuration + + Returns: + tuple: (success, conversation_log) where success is bool and + conversation_log is the agent's output text + """ + conversation_log = [] + try: + async for message in query(prompt=prompt, options=options): + if hasattr(message, 'content'): + for block in message.content: + if hasattr(block, 'text'): + print(block.text) + conversation_log.append(block.text) + return True, '\n\n'.join(conversation_log) + except (RuntimeError, ValueError, OSError) as exc: + tout.error(f'Agent failed: {exc}') + return False, '\n\n'.join(conversation_log) + + def is_qconfig_commit(subject): """Check if a commit subject indicates a qconfig resync commit @@ -228,19 +256,7 @@ this means the series was already applied via a different path. In this case: tout.info(f'Starting Claude agent to cherry-pick {len(commits)} commits...') tout.info('') - conversation_log = [] - try: - async for message in query(prompt=prompt, options=options): - # Print agent output and capture it - if hasattr(message, 'content'): - for block in message.content: - if hasattr(block, 'text'): - print(block.text) - conversation_log.append(block.text) - return True, '\n\n'.join(conversation_log) - except (RuntimeError, ValueError, OSError) as exc: - tout.error(f'Agent failed: {exc}') - return False, '\n\n'.join(conversation_log) + return await run_agent_collect(prompt, options) def read_signal_file(repo_path=None): @@ -492,18 +508,7 @@ async def run_review_agent(mr_iid, branch_name, comments, remote, tout.info(f'Starting Claude agent to {task_desc}...') tout.info('') - conversation_log = [] - try: - async for message in query(prompt=prompt, options=options): - if hasattr(message, 'content'): - for block in message.content: - if hasattr(block, 'text'): - print(block.text) - conversation_log.append(block.text) - return True, '\n\n'.join(conversation_log) - except (RuntimeError, ValueError, OSError) as exc: - tout.error(f'Agent failed: {exc}') - return False, '\n\n'.join(conversation_log) + return await run_agent_collect(prompt, options) # pylint: disable=too-many-arguments diff --git a/tools/pickman/ftest.py b/tools/pickman/ftest.py index de6bce40614..42ce05962e2 100644 --- a/tools/pickman/ftest.py +++ b/tools/pickman/ftest.py @@ -6,6 +6,7 @@ # pylint: disable=too-many-lines """Tests for pickman.""" +import asyncio import argparse import os import shutil @@ -2971,6 +2972,67 @@ class TestExecuteApply(unittest.TestCase): dbs.close() +class TestRunAgentCollect(unittest.TestCase): + """Tests for run_agent_collect function.""" + + def test_success(self): + """Test successful agent run collects text blocks.""" + block1 = mock.MagicMock() + block1.text = 'hello' + block2 = mock.MagicMock() + block2.text = 'world' + msg = mock.MagicMock() + msg.content = [block1, block2] + + async def fake_query(**kwargs): + yield msg + + with mock.patch.object(agent, 'query', fake_query, create=True): + with terminal.capture(): + opts = mock.MagicMock() + success, log = asyncio.run( + agent.run_agent_collect('prompt', opts)) + + self.assertTrue(success) + self.assertEqual(log, 'hello\n\nworld') + + def test_failure(self): + """Test agent failure returns False with partial log.""" + block = mock.MagicMock() + block.text = 'partial' + msg = mock.MagicMock() + msg.content = [block] + + async def fake_query(**kwargs): + yield msg + raise RuntimeError('agent crashed') + + with mock.patch.object(agent, 'query', fake_query, create=True): + with terminal.capture(): + opts = mock.MagicMock() + success, log = asyncio.run( + agent.run_agent_collect('prompt', opts)) + + self.assertFalse(success) + self.assertEqual(log, 'partial') + + def test_no_content(self): + """Test messages without content are skipped.""" + msg = mock.MagicMock(spec=[]) + + async def fake_query(**kwargs): + yield msg + + with mock.patch.object(agent, 'query', fake_query, create=True): + with terminal.capture(): + opts = mock.MagicMock() + success, log = asyncio.run( + agent.run_agent_collect('prompt', opts)) + + self.assertTrue(success) + self.assertEqual(log, '') + + class TestSignalFile(unittest.TestCase): """Tests for signal file handling.""" -- 2.43.0
From: Simon Glass <simon.glass@canonical.com> Replace manual open() calls with tools.read_file() and tools.write_file() for consistency with the rest of the codebase. Co-developed-by: Claude Opus 4.6 <noreply@anthropic.com> Signed-off-by: Simon Glass <simon.glass@canonical.com> --- tools/pickman/agent.py | 5 +++-- tools/pickman/control.py | 13 +++++-------- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/tools/pickman/agent.py b/tools/pickman/agent.py index 63952c1c005..4b914e314e8 100644 --- a/tools/pickman/agent.py +++ b/tools/pickman/agent.py @@ -14,6 +14,7 @@ our_path = os.path.dirname(os.path.realpath(__file__)) sys.path.insert(0, os.path.join(our_path, '..')) # pylint: disable=wrong-import-position,import-error +from u_boot_pylib import tools from u_boot_pylib import tout # Signal file for agent to communicate status back to pickman @@ -278,8 +279,8 @@ def read_signal_file(repo_path=None): return None, None try: - with open(signal_path, 'r', encoding='utf-8') as fhandle: - lines = fhandle.read().strip().split('\n') + data = tools.read_file(signal_path, binary=False) + lines = data.strip().split('\n') status = lines[0] if lines else None last_commit = lines[1] if len(lines) > 1 else None diff --git a/tools/pickman/control.py b/tools/pickman/control.py index 48f7ced1617..057e4400adb 100644 --- a/tools/pickman/control.py +++ b/tools/pickman/control.py @@ -26,6 +26,7 @@ from pickman import ftest from pickman import gitlab_api from u_boot_pylib import command from u_boot_pylib import terminal +from u_boot_pylib import tools from u_boot_pylib import tout # Default database filename @@ -1458,8 +1459,7 @@ def get_history(fname, source, commits, branch_name, conv_log): # Read existing content existing = '' if os.path.exists(fname): - with open(fname, 'r', encoding='utf-8') as fhandle: - existing = fhandle.read() + existing = tools.read_file(fname, binary=False) # Remove existing entry for this branch (from ## header to ---) pattern = rf'## [^\n]+\n\nBranch: {re.escape(branch_name)}\n.*?---\n\n' existing = re.sub(pattern, '', existing, flags=re.DOTALL) @@ -1467,8 +1467,7 @@ def get_history(fname, source, commits, branch_name, conv_log): content = existing + entry # Write updated history file - with open(fname, 'w', encoding='utf-8') as fhandle: - fhandle.write(content) + tools.write_file(fname, content, binary=False) # Generate commit message commit_msg = (f'pickman: Record cherry-pick of {len(commits)} commits ' @@ -2429,11 +2428,9 @@ Comments addressed: # Append to history file existing = '' if os.path.exists(HISTORY_FILE): - with open(HISTORY_FILE, 'r', encoding='utf-8') as fhandle: - existing = fhandle.read() + existing = tools.read_file(HISTORY_FILE, binary=False) - with open(HISTORY_FILE, 'w', encoding='utf-8') as fhandle: - fhandle.write(existing + entry) + tools.write_file(HISTORY_FILE, existing + entry, binary=False) # Commit the history file run_git(['add', '-f', HISTORY_FILE]) -- 2.43.0
From: Simon Glass <simon.glass@canonical.com> Replace manual open() calls with tools.read_file() and tools.write_file() in the test file for consistency. Co-developed-by: Claude Opus 4.6 <noreply@anthropic.com> Signed-off-by: Simon Glass <simon.glass@canonical.com> --- tools/pickman/ftest.py | 41 +++++++++++++++++++---------------------- 1 file changed, 19 insertions(+), 22 deletions(-) diff --git a/tools/pickman/ftest.py b/tools/pickman/ftest.py index 42ce05962e2..4a58a9371ce 100644 --- a/tools/pickman/ftest.py +++ b/tools/pickman/ftest.py @@ -23,6 +23,7 @@ sys.path.insert(0, os.path.join(our_path, '..')) # pylint: disable=wrong-import-position,import-error,cyclic-import from u_boot_pylib import command from u_boot_pylib import terminal +from u_boot_pylib import tools from u_boot_pylib import tout from pickman import __main__ as pickman @@ -1634,8 +1635,9 @@ class TestConfigFile(unittest.TestCase): def test_get_token_from_config(self): """Test getting token from config file.""" - with open(self.config_file, 'w', encoding='utf-8') as fhandle: - fhandle.write('[gitlab]\ntoken = test-config-token\n') + tools.write_file(self.config_file, + '[gitlab]\ntoken = test-config-token\n', + binary=False) with mock.patch.object(gitlab, 'CONFIG_FILE', self.config_file): token = gitlab.get_token() @@ -1651,8 +1653,8 @@ class TestConfigFile(unittest.TestCase): def test_get_token_config_missing_section(self): """Test config file without gitlab section.""" - with open(self.config_file, 'w', encoding='utf-8') as fhandle: - fhandle.write('[other]\nkey = value\n') + tools.write_file(self.config_file, '[other]\nkey = value\n', + binary=False) with mock.patch.object(gitlab, 'CONFIG_FILE', self.config_file): with mock.patch.dict(os.environ, {'GITLAB_TOKEN': 'env-token'}): @@ -1661,8 +1663,8 @@ class TestConfigFile(unittest.TestCase): def test_get_config_value(self): """Test get_config_value function.""" - with open(self.config_file, 'w', encoding='utf-8') as fhandle: - fhandle.write('[section1]\nkey1 = value1\n') + tools.write_file(self.config_file, '[section1]\nkey1 = value1\n', + binary=False) with mock.patch.object(gitlab, 'CONFIG_FILE', self.config_file): value = gitlab.get_config_value('section1', 'key1') @@ -2149,8 +2151,7 @@ class TestUpdateHistoryWithReview(unittest.TestCase): # Check history file was created self.assertTrue(os.path.exists(control.HISTORY_FILE)) - with open(control.HISTORY_FILE, 'r', encoding='utf-8') as fhandle: - content = fhandle.read() + content = tools.read_file(control.HISTORY_FILE, binary=False) self.assertIn('### Review:', content) self.assertIn('Branch: cherry-abc123', content) @@ -2161,8 +2162,8 @@ class TestUpdateHistoryWithReview(unittest.TestCase): def test_update_history_appends(self): """Test that review handling appends to existing history.""" # Create existing history - with open(control.HISTORY_FILE, 'w', encoding='utf-8') as fhandle: - fhandle.write('Existing history content\n') + tools.write_file(control.HISTORY_FILE, + 'Existing history content\n', binary=False) subprocess.run(['git', 'add', control.HISTORY_FILE], check=True, capture_output=True) subprocess.run(['git', 'commit', '-m', 'Initial'], @@ -2173,8 +2174,7 @@ class TestUpdateHistoryWithReview(unittest.TestCase): resolved=False)] control.update_history('cherry-xyz', comms, 'Fixed it') - with open(control.HISTORY_FILE, 'r', encoding='utf-8') as fhandle: - content = fhandle.read() + content = tools.read_file(control.HISTORY_FILE, binary=False) self.assertIn('Existing history content', content) self.assertIn('### Review:', content) @@ -2496,15 +2496,14 @@ class TestGetHistory(unittest.TestCase): self.assertIn('- aaa111a First commit', commit_msg) # Verify file was written - with open(self.history_file, 'r', encoding='utf-8') as fhandle: - file_content = fhandle.read() + file_content = tools.read_file(self.history_file, binary=False) self.assertEqual(file_content, content) def test_get_history_with_existing(self): """Test get_history appends to existing content.""" # Create existing file - with open(self.history_file, 'w', encoding='utf-8') as fhandle: - fhandle.write('Previous history content\n') + tools.write_file(self.history_file, + 'Previous history content\n', binary=False) commits = [ control.CommitInfo('bbb222', 'bbb222b', 'New commit', 'Author 2'), @@ -2535,8 +2534,7 @@ Old conversation Other content """ - with open(self.history_file, 'w', encoding='utf-8') as fhandle: - fhandle.write(existing) + tools.write_file(self.history_file, existing, binary=False) commits = [ control.CommitInfo('ccc333', 'ccc333c', 'Updated commit', 'Author'), @@ -3055,8 +3053,8 @@ class TestSignalFile(unittest.TestCase): def test_read_signal_file_already_applied(self): """Test read_signal_file with already_applied status.""" - with open(self.signal_path, 'w', encoding='utf-8') as fhandle: - fhandle.write('already_applied\nabc123def456\n') + tools.write_file(self.signal_path, + 'already_applied\nabc123def456\n', binary=False) status, commit = agent.read_signal_file(self.test_dir) self.assertEqual(status, 'already_applied') @@ -3067,8 +3065,7 @@ class TestSignalFile(unittest.TestCase): def test_read_signal_file_status_only(self): """Test read_signal_file with only status line.""" - with open(self.signal_path, 'w', encoding='utf-8') as fhandle: - fhandle.write('conflict\n') + tools.write_file(self.signal_path, 'conflict\n', binary=False) status, commit = agent.read_signal_file(self.test_dir) self.assertEqual(status, 'conflict') -- 2.43.0
From: Simon Glass <simon.glass@canonical.com> Add a pipeline_fix table (schema v4) for tracking pipeline-fix attempts per MR. Each row records the MR IID, pipeline ID, attempt number, status and timestamp. A UNIQUE constraint on (mr_iid, pipeline_id) ensures each pipeline is only processed once. Add pfix_count(), pfix_add() and pfix_has() accessors. Co-developed-by: Claude Opus 4.6 <noreply@anthropic.com> Signed-off-by: Simon Glass <simon.glass@canonical.com> --- tools/pickman/database.py | 65 +++++++++++++++++++++++++++++- tools/pickman/ftest.py | 84 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 148 insertions(+), 1 deletion(-) diff --git a/tools/pickman/database.py b/tools/pickman/database.py index 92bff7a5702..317668a979d 100644 --- a/tools/pickman/database.py +++ b/tools/pickman/database.py @@ -19,7 +19,7 @@ from u_boot_pylib import tools from u_boot_pylib import tout # Schema version (version 0 means there is no database yet) -LATEST = 3 +LATEST = 4 # Default database filename DB_FNAME = '.pickman.db' @@ -141,6 +141,19 @@ class Database: # pylint: disable=too-many-public-methods 'processed_at TEXT, ' 'UNIQUE(mr_iid, comment_id))') + def _create_v4(self): + """Migrate database to v4 schema - add pipeline_fix table""" + # Table for tracking pipeline fix attempts per MR + self.cur.execute( + 'CREATE TABLE pipeline_fix (' + 'id INTEGER PRIMARY KEY AUTOINCREMENT, ' + 'mr_iid INTEGER, ' + 'pipeline_id INTEGER, ' + 'attempt INTEGER, ' + 'status TEXT, ' + 'created_at TEXT, ' + 'UNIQUE(mr_iid, pipeline_id))') + def migrate_to(self, dest_version): """Migrate the database to the selected version @@ -165,6 +178,8 @@ class Database: # pylint: disable=too-many-public-methods self._create_v2() elif version == 3: self._create_v3() + elif version == 4: + self._create_v4() self.cur.execute('DELETE FROM schema_version') self.cur.execute( @@ -481,3 +496,51 @@ class Database: # pylint: disable=too-many-public-methods 'SELECT comment_id FROM comment WHERE mr_iid = ?', (mr_iid,)) return [row[0] for row in res.fetchall()] + + # pipeline_fix functions + + def pfix_count(self, mr_iid): + """Count fix attempts for an MR + + Args: + mr_iid (int): Merge request IID + + Return: + int: Number of fix attempts + """ + res = self.execute( + 'SELECT COUNT(*) FROM pipeline_fix WHERE mr_iid = ?', + (mr_iid,)) + return res.fetchone()[0] + + def pfix_add(self, mr_iid, pipeline_id, attempt, status): + """Record a pipeline fix attempt + + Args: + mr_iid (int): Merge request IID + pipeline_id (int): Pipeline ID + attempt (int): Attempt number + status (str): Status ('success' or 'failure') + """ + self.execute( + 'INSERT OR IGNORE INTO pipeline_fix ' + '(mr_iid, pipeline_id, attempt, status, created_at) ' + 'VALUES (?, ?, ?, ?, ?)', + (mr_iid, pipeline_id, attempt, status, + datetime.now().isoformat())) + + def pfix_has(self, mr_iid, pipeline_id): + """Check if a pipeline has already been handled + + Args: + mr_iid (int): Merge request IID + pipeline_id (int): Pipeline ID + + Return: + bool: True if already handled + """ + res = self.execute( + 'SELECT id FROM pipeline_fix ' + 'WHERE mr_iid = ? AND pipeline_id = ?', + (mr_iid, pipeline_id)) + return res.fetchone() is not None diff --git a/tools/pickman/ftest.py b/tools/pickman/ftest.py index 4a58a9371ce..67a7d004ca6 100644 --- a/tools/pickman/ftest.py +++ b/tools/pickman/ftest.py @@ -818,6 +818,90 @@ class TestDatabaseComment(unittest.TestCase): dbs.close() +class TestDatabasePipelineFix(unittest.TestCase): + """Tests for Database pipeline_fix functions.""" + + def setUp(self): + """Set up test fixtures.""" + fd, self.db_path = tempfile.mkstemp(suffix='.db') + os.close(fd) + os.unlink(self.db_path) + + def tearDown(self): + """Clean up test fixtures.""" + if os.path.exists(self.db_path): + os.unlink(self.db_path) + database.Database.instances.clear() + + def test_pfix_add(self): + """Test adding a pipeline fix record""" + with terminal.capture(): + dbs = database.Database(self.db_path) + dbs.start() + + dbs.pfix_add(123, 456, 1, 'success') + dbs.commit() + + self.assertTrue(dbs.pfix_has(123, 456)) + + dbs.close() + + def test_pfix_count(self): + """Test counting pipeline fix attempts""" + with terminal.capture(): + dbs = database.Database(self.db_path) + dbs.start() + + self.assertEqual(dbs.pfix_count(123), 0) + + dbs.pfix_add(123, 100, 1, 'failure') + dbs.pfix_add(123, 200, 2, 'success') + dbs.commit() + + self.assertEqual(dbs.pfix_count(123), 2) + # Different MR should have 0 + self.assertEqual(dbs.pfix_count(999), 0) + + dbs.close() + + def test_pfix_has(self): + """Test checking if a pipeline was already handled""" + with terminal.capture(): + dbs = database.Database(self.db_path) + dbs.start() + + self.assertFalse(dbs.pfix_has(123, 456)) + + dbs.pfix_add(123, 456, 1, 'success') + dbs.commit() + + self.assertTrue(dbs.pfix_has(123, 456)) + # Different pipeline should not be handled + self.assertFalse(dbs.pfix_has(123, 789)) + # Different MR should not be handled + self.assertFalse(dbs.pfix_has(999, 456)) + + dbs.close() + + def test_pfix_unique(self): + """Test that duplicate mr_iid/pipeline_id pairs are ignored""" + with terminal.capture(): + dbs = database.Database(self.db_path) + dbs.start() + + dbs.pfix_add(123, 456, 1, 'failure') + dbs.commit() + + # Adding same pair again should not raise (OR IGNORE) + dbs.pfix_add(123, 456, 2, 'success') + dbs.commit() + + # Count should still be 1 (second insert ignored) + self.assertEqual(dbs.pfix_count(123), 1) + + dbs.close() + + class TestListSources(unittest.TestCase): """Tests for list-sources command.""" -- 2.43.0
From: Simon Glass <simon.glass@canonical.com> Add PickmanMr pipeline_status/pipeline_id fields (extracted from the head_pipeline attribute), PipelineInfo and FailedJob named tuples, and get_failed_jobs() which fetches the trace logs of failed jobs from a GitLab pipeline. Co-developed-by: Claude Opus 4.6 <noreply@anthropic.com> Signed-off-by: Simon Glass <simon.glass@canonical.com> --- tools/pickman/gitlab_api.py | 77 ++++++++++++++++++++++++++++++++++++- 1 file changed, 75 insertions(+), 2 deletions(-) diff --git a/tools/pickman/gitlab_api.py b/tools/pickman/gitlab_api.py index 9262ac15a0d..d9635d392ee 100644 --- a/tools/pickman/gitlab_api.py +++ b/tools/pickman/gitlab_api.py @@ -42,14 +42,21 @@ class MrCreateError(Exception): # Use defaults for new fields so existing code doesn't break PickmanMr = namedtuple('PickmanMr', [ 'iid', 'title', 'web_url', 'source_branch', 'description', - 'has_conflicts', 'needs_rebase' -], defaults=[False, False]) + 'has_conflicts', 'needs_rebase', 'pipeline_status', 'pipeline_id' +], defaults=[False, False, None, None]) # Comment info returned by get_mr_comments() MrComment = namedtuple('MrComment', [ 'id', 'author', 'body', 'created_at', 'resolvable', 'resolved' ]) +# Pipeline info +PipelineInfo = namedtuple('PipelineInfo', ['id', 'status', 'web_url']) + +# Failed job info from a pipeline +FailedJob = namedtuple('FailedJob', + ['id', 'name', 'stage', 'web_url', 'log_tail']) + def check_available(): """Check if the python-gitlab module is available @@ -320,6 +327,9 @@ def get_pickman_mrs(remote, state='opened'): # For open MRs, fetch full details since list() doesn't # include accurate merge status fields + pipeline_status = None + pipeline_id = None + if state == 'opened': full_mr = project.mergerequests.get(merge_req.iid) has_conflicts = getattr(full_mr, 'has_conflicts', False) @@ -333,6 +343,12 @@ def get_pickman_mrs(remote, state='opened'): diverged = getattr(full_mr, 'diverged_commits_count', 0) needs_rebase = diverged and diverged > 0 + # Extract pipeline info from head_pipeline + head_pipeline = getattr(full_mr, 'head_pipeline', None) + if head_pipeline: + pipeline_status = head_pipeline.get('status') + pipeline_id = head_pipeline.get('id') + pickman_mrs.append(PickmanMr( iid=merge_req.iid, title=merge_req.title, @@ -341,6 +357,8 @@ def get_pickman_mrs(remote, state='opened'): description=merge_req.description or '', has_conflicts=has_conflicts, needs_rebase=needs_rebase, + pipeline_status=pipeline_status, + pipeline_id=pipeline_id, )) return pickman_mrs except gitlab.exceptions.GitlabError as exc: @@ -423,6 +441,61 @@ def get_mr_comments(remote, mr_iid): return None +def get_failed_jobs(remote, pipeline_id, max_log_lines=200): + """Get failed jobs from a pipeline + + Args: + remote (str): Remote name + pipeline_id (int): Pipeline ID + max_log_lines (int): Maximum log lines to fetch per job + + Returns: + list: List of FailedJob tuples, or None on failure + """ + if not check_available(): + return None + + token = get_token() + if not token: + tout.error('GITLAB_TOKEN environment variable not set') + return None + + remote_url = get_remote_url(remote) + host, proj_path = parse_url(remote_url) + + if not host or not proj_path: + return None + + try: + glab = gitlab.Gitlab(f'https://{host}', private_token=token) + project = glab.projects.get(proj_path) + pipeline = project.pipelines.get(pipeline_id) + jobs = pipeline.jobs.list(scope='failed', get_all=True) + + failed_jobs = [] + for job in jobs: + # Fetch full job to get trace + full_job = project.jobs.get(job.id) + try: + trace = full_job.trace().decode('utf-8', errors='replace') + lines = trace.splitlines() + log_tail = '\n'.join(lines[-max_log_lines:]) + except (AttributeError, gitlab.exceptions.GitlabError): + log_tail = '' + + failed_jobs.append(FailedJob( + id=job.id, + name=job.name, + stage=job.stage, + web_url=job.web_url, + log_tail=log_tail, + )) + return failed_jobs + except gitlab.exceptions.GitlabError as exc: + tout.error(f'GitLab API error: {exc}') + return None + + def reply_to_mr(remote, mr_iid, message): """Post a reply to a merge request -- 2.43.0
From: Simon Glass <simon.glass@canonical.com> After pickman pushes cherry-pick MRs to GitLab, CI pipelines sometimes fail due to build or test errors introduced by the cherry-picks. This currently requires manual intervention. Add a pipeline-fix feature that detects failed pipelines on open MRs and uses a Claude agent to diagnose and fix them. The agent analyses the failed job logs, identifies the responsible commit, amends it via interactive rebase (using uman's rf/rn helpers), verifies the fix with um build and buildman, then leaves the result on a local branch for the caller to push. The feature integrates into the existing do_step()/do_poll() flow, running after review comment processing. A --fix-retries/-F flag (default 3, 0 to disable) controls the maximum attempts per MR. Each attempt is tracked per pipeline ID in the pipeline_fix table so the same failure is never reprocessed, and a new pipeline from a rebase is treated independently. On success, pickman pushes the fix branch, posts an MR comment summarising what was fixed, updates the MR description with the full agent log, and records the fix in .pickman-history. On failure or when the retry limit is reached, a comment is posted requesting manual intervention. Co-developed-by: Claude Opus 4.6 <noreply@anthropic.com> Signed-off-by: Simon Glass <simon.glass@canonical.com> --- tools/pickman/README.rst | 48 ++++ tools/pickman/__main__.py | 6 + tools/pickman/agent.py | 177 +++++++++++++ tools/pickman/control.py | 247 ++++++++++++++++++ tools/pickman/ftest.py | 536 +++++++++++++++++++++++++++++++++++++- 5 files changed, 1011 insertions(+), 3 deletions(-) diff --git a/tools/pickman/README.rst b/tools/pickman/README.rst index 70309032838..0c9d2d1521b 100644 --- a/tools/pickman/README.rst +++ b/tools/pickman/README.rst @@ -212,6 +212,39 @@ This ensures: - No manual intervention is required to continue - False positives are minimized by comparing actual patch content +Pipeline Fix +------------ + +When a CI pipeline fails on a pickman MR, the ``step`` and ``poll`` commands +can automatically diagnose and fix the failure using a Claude agent. This is +useful when cherry-picks introduce build or test failures that need minor +adjustments. + +**How it works** + +During each step, after processing review comments, pickman checks active MRs +for failed pipelines. For each failed pipeline: + +1. Pickman fetches the failed job logs from GitLab +2. A Claude agent analyses the logs, diagnoses the root cause, and makes + targeted fixes +3. The fix is pushed to the MR branch, triggering a new pipeline +4. The attempt is recorded in the database to avoid reprocessing + +**Retry behaviour** + +Each MR gets up to ``--fix-retries`` attempts (default: 3). If the limit is +reached, pickman posts a comment on the MR indicating that manual intervention +is required. Set ``--fix-retries 0`` to disable automatic pipeline fixing. + +Each attempt is tracked per pipeline ID, so a new pipeline triggered by a rebase +or comment fix is treated independently. + +**Options** + +- ``-F, --fix-retries``: Maximum pipeline-fix attempts per MR (default: 3, 0 to + disable). Available on both ``step`` and ``poll`` commands. + CI Pipelines ------------ @@ -448,6 +481,7 @@ review comments are handled automatically. Options for the step command: +- ``-F, --fix-retries``: Max pipeline-fix attempts per MR (default: 3, 0 to disable) - ``-m, --max-mrs``: Maximum open MRs allowed (default: 5) - ``-r, --remote``: Git remote for push (default: ci) - ``-t, --target``: Target branch for MR (default: master) @@ -461,6 +495,7 @@ creating new MRs as previous ones are merged. Press Ctrl+C to stop. Options for the poll command: +- ``-F, --fix-retries``: Max pipeline-fix attempts per MR (default: 3, 0 to disable) - ``-i, --interval``: Interval between steps in seconds (default: 300) - ``-m, --max-mrs``: Maximum open MRs allowed (default: 5) - ``-r, --remote``: Git remote for push (default: ci) @@ -563,6 +598,19 @@ Tables This table prevents the same comment from being addressed multiple times when running ``review`` or ``poll`` commands. +**pipeline_fix** + Tracks pipeline fix attempts per MR to avoid reprocessing. + + - ``id``: Primary key + - ``mr_iid``: GitLab merge request IID + - ``pipeline_id``: GitLab pipeline ID + - ``attempt``: Attempt number + - ``status``: Result ('success', 'failure', 'skipped', 'no_jobs') + - ``created_at``: Timestamp when the attempt was made + + The ``(mr_iid, pipeline_id)`` pair is unique, so each pipeline is only + processed once. + Configuration ------------- diff --git a/tools/pickman/__main__.py b/tools/pickman/__main__.py index 7814fd0fedc..1e13646ec52 100755 --- a/tools/pickman/__main__.py +++ b/tools/pickman/__main__.py @@ -113,6 +113,9 @@ def add_main_commands(subparsers): step_cmd = subparsers.add_parser('step', help='Create MR if none pending') step_cmd.add_argument('source', help='Source branch name') + step_cmd.add_argument('-F', '--fix-retries', type=int, default=3, + help='Max pipeline-fix attempts per MR ' + '(0 to disable, default: 3)') step_cmd.add_argument('-m', '--max-mrs', type=int, default=5, help='Max open MRs allowed (default: 5)') step_cmd.add_argument('-r', '--remote', default='ci', @@ -123,6 +126,9 @@ def add_main_commands(subparsers): poll_cmd = subparsers.add_parser('poll', help='Run step repeatedly until stopped') poll_cmd.add_argument('source', help='Source branch name') + poll_cmd.add_argument('-F', '--fix-retries', type=int, default=3, + help='Max pipeline-fix attempts per MR ' + '(0 to disable, default: 3)') poll_cmd.add_argument('-i', '--interval', type=int, default=300, help='Interval between steps in seconds ' '(default: 300)') diff --git a/tools/pickman/agent.py b/tools/pickman/agent.py index 4b914e314e8..ec5c1b0df75 100644 --- a/tools/pickman/agent.py +++ b/tools/pickman/agent.py @@ -536,3 +536,180 @@ def handle_mr_comments(mr_iid, branch_name, comments, remote, target='master', return asyncio.run(run_review_agent(mr_iid, branch_name, comments, remote, target, needs_rebase, has_conflicts, mr_description, repo_path)) + + +# pylint: disable=too-many-arguments +def build_pipeline_fix_prompt(mr_iid, branch_name, failed_jobs, remote, + target, mr_description, attempt): + """Build prompt and task description for the pipeline fix agent + + Args: + mr_iid (int): Merge request IID + branch_name (str): Source branch name + failed_jobs (list): List of FailedJob tuples + remote (str): Git remote name + target (str): Target branch + mr_description (str): MR description with context + attempt (int): Fix attempt number + + Returns: + tuple: (prompt, task_desc) where prompt is the full agent prompt and + task_desc is a short description + """ + task_desc = f'fix {len(failed_jobs)} failed pipeline job(s) (attempt {attempt})' + + # Format failed jobs + job_sections = [] + for job in failed_jobs: + job_sections.append( + f'### Job: {job.name} (stage: {job.stage})\n' + f'URL: {job.web_url}\n' + f'Log tail:\n```\n{job.log_tail}\n```' + ) + jobs_text = '\n\n'.join(job_sections) + + # Include MR description for context + context_section = '' + if mr_description: + context_section = f''' +Context from MR description: + +{mr_description} +''' + + # Extract board names from failed job names for targeted builds. + # CI job names typically contain a board name (e.g. 'build:sandbox', + # 'test:venice_gw7905', 'world build <board>'). Collect unique names + # to pass to buildman so the agent can verify all affected boards. + board_names = set() + for job in failed_jobs: + # Try common CI patterns: 'build:<board>', 'test:<board>', + # or a board name token in the job name + for part in job.name.replace(':', ' ').replace('/', ' ').split(): + # Skip generic tokens that are not board names + if part.lower() in ('build', 'test', 'world', 'check', 'lint', + 'ci', 'job', 'stage'): + continue + board_names.add(part) + + # Always include sandbox for a basic sanity check + board_names.add('sandbox') + boards_csv = ','.join(sorted(board_names)) + + prompt = f"""Fix pipeline failures for merge request !{mr_iid} \ +(branch: {branch_name}, attempt {attempt}). +{context_section} +Failed jobs: + +{jobs_text} + +Steps to follow: +1. Checkout the branch: git checkout {branch_name} +2. Diagnose the root cause from the job logs above +3. Identify which commit introduced the problem: + - Use 'git log --oneline' to list the commits on the branch + - Correlate the failing file/symbol with the commit that touched it + - Use 'git log --oneline -- <file>' if needed +4. Apply the fix to the appropriate commit: + - If you can identify the responsible commit, use uman's rebase helpers + to amend it: + a) 'rf N' to start an interactive rebase going back N commits from + HEAD, stopping at the oldest (first) commit in the range + b) Make your fix, then amend the commit with a 1-2 line note appended + to the end of the commit message describing the fix, e.g.: + git add <files> + git commit --amend -m "$(git log -1 --format=%B) + + [pickman] Fix <short description of what was fixed>" + c) 'rn' to advance to the next commit (or 'git rebase --continue' + to finish) + - If the cause spans multiple commits or cannot be pinpointed, add a new + fixup commit on top of the branch +5. Build and verify: + a) Quick sandbox check: um build sandbox + b) Build all affected boards: \ +buildman -o /tmp/pickman {boards_csv} + Fix any build errors before proceeding. +6. Create a local branch: {branch_name}-fix{attempt} +7. Report what was fixed: which commit was responsible, what the root cause + was, and what change was made. Do NOT push the branch; the caller + handles that. + +Important: +- Keep changes minimal and focused on fixing the failures +- Prefer amending the responsible commit over adding a new commit, so the + MR history stays clean +- If the failure is an infrastructure or transient issue (network timeout, \ +runner problem, etc.), report this without making changes +- Do not modify unrelated code +- Use 'um build sandbox' for sandbox builds (fast, local) +- Use 'buildman -o /tmp/pickman <board1> <board2> ...' to build multiple + boards in one go +- Leave the result on local branch {branch_name}-fix{attempt} +""" + + return prompt, task_desc + + +async def run_pipeline_fix_agent(mr_iid, branch_name, failed_jobs, remote, + target='master', mr_description='', + attempt=1, repo_path=None): + """Run the Claude agent to fix pipeline failures + + Args: + mr_iid (int): Merge request IID + branch_name (str): Source branch name + failed_jobs (list): List of FailedJob tuples + remote (str): Git remote name + target (str): Target branch + mr_description (str): MR description with context + attempt (int): Fix attempt number + repo_path (str): Path to repository (defaults to current directory) + + Returns: + tuple: (success, conversation_log) where success is bool and + conversation_log is the agent's output text + """ + if not check_available(): + return False, '' + + if repo_path is None: + repo_path = os.getcwd() + + prompt, task_desc = build_pipeline_fix_prompt( + mr_iid, branch_name, failed_jobs, remote, target, + mr_description, attempt) + + options = ClaudeAgentOptions( + allowed_tools=['Bash', 'Read', 'Grep', 'Edit', 'Write'], + cwd=repo_path, + max_buffer_size=MAX_BUFFER_SIZE, + ) + + tout.info(f'Starting Claude agent to {task_desc}...') + tout.info('') + + return await run_agent_collect(prompt, options) + + +def fix_pipeline(mr_iid, branch_name, failed_jobs, remote, target='master', + mr_description='', attempt=1, repo_path=None): + """Synchronous wrapper for running the pipeline fix agent + + Args: + mr_iid (int): Merge request IID + branch_name (str): Source branch name + failed_jobs (list): List of FailedJob tuples + remote (str): Git remote name + target (str): Target branch + mr_description (str): MR description with context + attempt (int): Fix attempt number + repo_path (str): Path to repository (defaults to current directory) + + Returns: + tuple: (success, conversation_log) where success is bool and + conversation_log is the agent's output text + """ + return asyncio.run(run_pipeline_fix_agent( + mr_iid, branch_name, failed_jobs, remote, target, + mr_description, attempt, repo_path)) diff --git a/tools/pickman/control.py b/tools/pickman/control.py index 057e4400adb..f4d4a43c292 100644 --- a/tools/pickman/control.py +++ b/tools/pickman/control.py @@ -2398,6 +2398,248 @@ def process_mr_reviews(remote, mrs, dbs, target='master'): return processed +def _rebase_mr_branch(remote, merge_req, dbs, target): + """Rebase an MR branch onto the target before attempting a pipeline fix + + When a branch needs rebasing, the pipeline failure may be caused by the + stale base rather than by the cherry-picked commits. Rebasing and pushing + triggers a fresh pipeline run. + + Args: + remote (str): Remote name + merge_req (PickmanMr): MR with a failed pipeline + dbs (Database): Database instance for tracking fix attempts + target (str): Target branch + + Returns: + True if the branch was rebased and pushed, False if the rebase + failed (conflicts), or None if no rebase is needed + """ + if not merge_req.needs_rebase and not merge_req.has_conflicts: + return None + + mr_iid = merge_req.iid + branch = merge_req.source_branch + if merge_req.has_conflicts: + tout.info(f'MR !{mr_iid}: has conflicts, rebasing before ' + f'pipeline fix...') + else: + tout.info(f'MR !{mr_iid}: needs rebase, rebasing before ' + f'pipeline fix...') + run_git(['checkout', branch]) + try: + run_git(['rebase', f'{remote}/{target}']) + except command.CommandExc: + tout.warning(f'MR !{mr_iid}: rebase failed, aborting') + try: + run_git(['rebase', '--abort']) + except command.CommandExc: + pass + return False + gitlab_api.push_branch(remote, branch, force=True, skip_ci=False) + dbs.pfix_add(mr_iid, merge_req.pipeline_id, 0, 'rebased') + dbs.commit() + tout.info(f'MR !{mr_iid}: rebased and pushed, waiting for ' + f'new pipeline') + return True + + +def _attempt_pipeline_fix(remote, merge_req, dbs, target, attempt): + """Run the agent to fix a failed pipeline and report the result + + Fetches the failed-job logs, invokes the fix agent, then pushes the + result and updates the MR description and history on success, or posts + a failure comment otherwise. + + Args: + remote (str): Remote name + merge_req (PickmanMr): MR with a failed pipeline + dbs (Database): Database instance for tracking fix attempts + target (str): Target branch + attempt (int): Current fix attempt number + + Returns: + bool: True if the fix was attempted, False if no failed jobs + were found + """ + mr_iid = merge_req.iid + + # Fetch failed jobs + failed_jobs = gitlab_api.get_failed_jobs(remote, merge_req.pipeline_id) + if not failed_jobs: + tout.info(f'MR !{mr_iid}: no failed jobs found') + dbs.pfix_add(mr_iid, merge_req.pipeline_id, attempt, 'no_jobs') + dbs.commit() + return False + + # Run agent to fix the failures + success, conversation_log = agent.fix_pipeline( + mr_iid, + merge_req.source_branch, + failed_jobs, + remote, + target, + mr_description=merge_req.description, + attempt=attempt, + ) + + status = 'success' if success else 'failure' + dbs.pfix_add(mr_iid, merge_req.pipeline_id, attempt, status) + dbs.commit() + + if success: + # Push the fix branch to the original MR branch + branch = merge_req.source_branch + gitlab_api.push_branch(remote, branch, force=True, + skip_ci=False) + + # Update MR description with fix log + old_desc = merge_req.description + job_names = ', '.join(j.name for j in failed_jobs) + new_desc = (f"{old_desc}\n\n### Pipeline fix (attempt {attempt})" + f"\n\n**Failed jobs:** {job_names}\n\n" + f"**Response:**\n{conversation_log}") + gitlab_api.update_mr_desc(remote, mr_iid, new_desc) + + # Post a comment summarising the fix + gitlab_api.reply_to_mr( + remote, mr_iid, + f'Pipeline fix (attempt {attempt}): ' + f'fixed failed job(s) {job_names}.\n\n' + f'{conversation_log[:2000]}') + + # Update .pickman-history + update_history_pipeline_fix(merge_req.source_branch, failed_jobs, + conversation_log, attempt) + + tout.info(f'MR !{mr_iid}: pipeline fix pushed (attempt {attempt})') + else: + gitlab_api.reply_to_mr( + remote, mr_iid, + f'Pipeline fix attempt {attempt} failed. ' + f'Agent output:\n\n{conversation_log[:1000]}') + tout.error(f'MR !{mr_iid}: pipeline fix failed ' + f'(attempt {attempt})') + + return True + + +def process_pipeline_failures(remote, mrs, dbs, target, max_retries): + """Process pipeline failures on open MRs + + Checks each MR for failed pipelines and uses Claude agent to diagnose + and fix them. Tracks attempts in the database to avoid reprocessing. + + Args: + remote (str): Remote name + mrs (list): List of active (non-skipped) PickmanMr tuples + dbs (Database): Database instance for tracking fix attempts + target (str): Target branch + max_retries (int): Maximum fix attempts per MR + + Returns: + int: Number of MRs with pipeline fixes attempted + """ + # Save current branch to restore later + original_branch = run_git(['rev-parse', '--abbrev-ref', 'HEAD']) + + # Fetch to get latest remote state + tout.info(f'Fetching {remote}...') + run_git(['fetch', remote]) + + processed = 0 + for merge_req in mrs: + mr_iid = merge_req.iid + + # Skip if pipeline is not failed or has no pipeline + if merge_req.pipeline_status != 'failed': + continue + if merge_req.pipeline_id is None: + continue + + # Skip if this pipeline was already handled + if dbs.pfix_has(mr_iid, merge_req.pipeline_id): + continue + + rebased = _rebase_mr_branch(remote, merge_req, dbs, target) + if rebased is not None: + if rebased: + processed += 1 + continue + + attempt = dbs.pfix_count(mr_iid) + 1 + + # Check retry limit + if attempt > max_retries: + tout.info(f'MR !{mr_iid}: reached fix retry limit ' + f'({max_retries}), skipping') + gitlab_api.reply_to_mr( + remote, mr_iid, + f'Pipeline fix: reached retry limit ({max_retries} ' + f'attempts). Manual intervention required.') + dbs.pfix_add(mr_iid, merge_req.pipeline_id, attempt, 'skipped') + dbs.commit() + continue + + tout.info('') + tout.info(f'MR !{mr_iid}: pipeline {merge_req.pipeline_id} failed, ' + f'attempting fix (attempt {attempt}/{max_retries})...') + + if _attempt_pipeline_fix(remote, merge_req, dbs, target, attempt): + processed += 1 + + # Restore original branch + if processed: + tout.info(f'Returning to {original_branch}') + run_git(['checkout', original_branch]) + + return processed + + +def update_history_pipeline_fix(branch_name, failed_jobs, conversation_log, + attempt): + """Append pipeline fix handling to .pickman-history + + Args: + branch_name (str): Branch name for the MR + failed_jobs (list): List of FailedJob tuples that were fixed + conversation_log (str): Agent conversation log + attempt (int): Fix attempt number + """ + job_summary = '\n'.join( + f'- {j.name} ({j.stage})' + for j in failed_jobs + ) + + entry = f'''### Pipeline fix: {date.today()} (attempt {attempt}) + +Branch: {branch_name} + +Failed jobs: +{job_summary} + +### Conversation log +{conversation_log} + +--- + +''' + + # Append to history file + existing = '' + if os.path.exists(HISTORY_FILE): + with open(HISTORY_FILE, 'r', encoding='utf-8') as fhandle: + existing = fhandle.read() + + with open(HISTORY_FILE, 'w', encoding='utf-8') as fhandle: + fhandle.write(existing + entry) + + # Commit the history file + run_git(['add', '-f', HISTORY_FILE]) + run_git(['commit', '-m', + f'pickman: Record pipeline fix for {branch_name}']) + + def update_history(branch_name, comments, conversation_log): """Append review handling to .pickman-history @@ -2604,6 +2846,11 @@ def do_step(args, dbs): # in case they have an unskip request) process_mr_reviews(remote, mrs, dbs, args.target) + # Process pipeline failures on active MRs only + if active_mrs and args.fix_retries > 0: + process_pipeline_failures(remote, active_mrs, dbs, + args.target, args.fix_retries) + # Only block new MR creation if we've reached the max allowed open MRs max_mrs = args.max_mrs if len(active_mrs) >= max_mrs: diff --git a/tools/pickman/ftest.py b/tools/pickman/ftest.py index 67a7d004ca6..af26cbb4229 100644 --- a/tools/pickman/ftest.py +++ b/tools/pickman/ftest.py @@ -2068,7 +2068,7 @@ class TestStep(unittest.TestCase): return_value=[mock_mr]): args = argparse.Namespace(cmd='step', source='us/next', remote='ci', target='master', - max_mrs=1) + max_mrs=1, fix_retries=3) with terminal.capture(): ret = control.do_step(args, None) @@ -2118,7 +2118,7 @@ class TestStep(unittest.TestCase): return_value=0) as mock_apply: args = argparse.Namespace(cmd='step', source='us/next', remote='ci', target='master', - max_mrs=5) + max_mrs=5, fix_retries=3) with terminal.capture(): ret = control.do_step(args, None) @@ -2146,7 +2146,7 @@ class TestStep(unittest.TestCase): with mock.patch.object(control, 'do_apply') as mock_apply: args = argparse.Namespace(cmd='step', source='us/next', remote='ci', target='master', - max_mrs=3) + max_mrs=3, fix_retries=3) with terminal.capture(): ret = control.do_step(args, None) @@ -5636,5 +5636,535 @@ class TestDoPick(unittest.TestCase): dbs.close() +class TestPickmanMrPipelineFields(unittest.TestCase): + """Tests for PickmanMr pipeline fields.""" + + def test_defaults_none(self): + """Test that pipeline fields default to None""" + pmr = gitlab.PickmanMr( + iid=1, + title='[pickman] Test', + web_url='https://example.com/mr/1', + source_branch='cherry-test', + description='Test', + ) + self.assertIsNone(pmr.pipeline_status) + self.assertIsNone(pmr.pipeline_id) + + def test_with_pipeline(self): + """Test creating PickmanMr with pipeline fields""" + pmr = gitlab.PickmanMr( + iid=1, + title='[pickman] Test', + web_url='https://example.com/mr/1', + source_branch='cherry-test', + description='Test', + pipeline_status='failed', + pipeline_id=42, + ) + self.assertEqual(pmr.pipeline_status, 'failed') + self.assertEqual(pmr.pipeline_id, 42) + + +class TestGetFailedJobs(unittest.TestCase): + """Tests for get_failed_jobs function.""" + + def _make_mock_job(self, job_id, name, stage, web_url, trace_bytes): + """Helper to create a mock job object""" + job = mock.MagicMock() + job.id = job_id + job.name = name + job.stage = stage + job.web_url = web_url + return job + + @mock.patch.object(gitlab, 'get_remote_url', + return_value=TEST_SSH_URL) + @mock.patch.object(gitlab, 'get_token', return_value='test-token') + @mock.patch.object(gitlab, 'AVAILABLE', True) + def test_success(self, _mock_token, _mock_url): + """Test successful retrieval of failed jobs""" + mock_job = self._make_mock_job( + 1, 'build:sandbox', 'build', 'https://gitlab.com/job/1', + b'line1\nline2\nerror: build failed\n') + + mock_full_job = mock.MagicMock() + mock_full_job.trace.return_value = b'line1\nline2\nerror: build failed\n' + + mock_pipeline = mock.MagicMock() + mock_pipeline.jobs.list.return_value = [mock_job] + + mock_project = mock.MagicMock() + mock_project.pipelines.get.return_value = mock_pipeline + mock_project.jobs.get.return_value = mock_full_job + + mock_glab = mock.MagicMock() + mock_glab.projects.get.return_value = mock_project + + with mock.patch('gitlab.Gitlab', return_value=mock_glab): + with terminal.capture(): + result = gitlab.get_failed_jobs('ci', 100) + + self.assertIsNotNone(result) + self.assertEqual(len(result), 1) + self.assertEqual(result[0].name, 'build:sandbox') + self.assertEqual(result[0].stage, 'build') + self.assertIn('error: build failed', result[0].log_tail) + + @mock.patch.object(gitlab, 'get_remote_url', + return_value=TEST_SSH_URL) + @mock.patch.object(gitlab, 'get_token', return_value='test-token') + @mock.patch.object(gitlab, 'AVAILABLE', True) + def test_empty(self, _mock_token, _mock_url): + """Test when no failed jobs exist""" + mock_pipeline = mock.MagicMock() + mock_pipeline.jobs.list.return_value = [] + + mock_project = mock.MagicMock() + mock_project.pipelines.get.return_value = mock_pipeline + + mock_glab = mock.MagicMock() + mock_glab.projects.get.return_value = mock_project + + with mock.patch('gitlab.Gitlab', return_value=mock_glab): + with terminal.capture(): + result = gitlab.get_failed_jobs('ci', 100) + + self.assertIsNotNone(result) + self.assertEqual(len(result), 0) + + @mock.patch.object(gitlab, 'get_remote_url', + return_value=TEST_SSH_URL) + @mock.patch.object(gitlab, 'get_token', return_value='test-token') + @mock.patch.object(gitlab, 'AVAILABLE', True) + def test_log_truncation(self, _mock_token, _mock_url): + """Test that log output is truncated to max_log_lines""" + # Create a trace with 500 lines + trace_lines = [f'line {i}' for i in range(500)] + trace_bytes = '\n'.join(trace_lines).encode() + + mock_job = self._make_mock_job( + 1, 'test:sandbox', 'test', 'https://gitlab.com/job/1', + trace_bytes) + + mock_full_job = mock.MagicMock() + mock_full_job.trace.return_value = trace_bytes + + mock_pipeline = mock.MagicMock() + mock_pipeline.jobs.list.return_value = [mock_job] + + mock_project = mock.MagicMock() + mock_project.pipelines.get.return_value = mock_pipeline + mock_project.jobs.get.return_value = mock_full_job + + mock_glab = mock.MagicMock() + mock_glab.projects.get.return_value = mock_project + + with mock.patch('gitlab.Gitlab', return_value=mock_glab): + with terminal.capture(): + result = gitlab.get_failed_jobs('ci', 100, max_log_lines=50) + + self.assertEqual(len(result), 1) + # Should only have last 50 lines + log_lines = result[0].log_tail.splitlines() + self.assertEqual(len(log_lines), 50) + self.assertIn('line 499', result[0].log_tail) + + +class TestBuildPipelineFixPrompt(unittest.TestCase): + """Tests for build_pipeline_fix_prompt function.""" + + def test_single_job(self): + """Test prompt with a single failed job""" + failed_jobs = [ + gitlab.FailedJob( + id=1, name='build:sandbox', stage='build', + web_url='https://gitlab.com/job/1', + log_tail='error: undefined reference'), + ] + prompt, task_desc = agent.build_pipeline_fix_prompt( + 42, 'cherry-abc123', failed_jobs, 'ci', 'master', + 'Test MR desc', 1) + + self.assertIn('!42', prompt) + self.assertIn('cherry-abc123', prompt) + self.assertIn('build:sandbox', prompt) + self.assertIn('error: undefined reference', prompt) + self.assertIn('attempt 1', prompt) + self.assertIn('cherry-abc123-fix1', prompt) + self.assertIn('1 failed', task_desc) + + def test_multiple_jobs(self): + """Test prompt with multiple failed jobs""" + failed_jobs = [ + gitlab.FailedJob( + id=1, name='build:sandbox', stage='build', + web_url='https://gitlab.com/job/1', + log_tail='build error'), + gitlab.FailedJob( + id=2, name='test:dm', stage='test', + web_url='https://gitlab.com/job/2', + log_tail='test failure'), + ] + prompt, task_desc = agent.build_pipeline_fix_prompt( + 42, 'cherry-abc123', failed_jobs, 'ci', 'master', '', 1) + + self.assertIn('build:sandbox', prompt) + self.assertIn('test:dm', prompt) + self.assertIn('build error', prompt) + self.assertIn('test failure', prompt) + self.assertIn('2 failed', task_desc) + + def test_attempt_number(self): + """Test that attempt number is reflected in prompt""" + failed_jobs = [ + gitlab.FailedJob( + id=1, name='build', stage='build', + web_url='https://gitlab.com/job/1', + log_tail='error'), + ] + prompt, task_desc = agent.build_pipeline_fix_prompt( + 42, 'cherry-abc123', failed_jobs, 'ci', 'master', '', 3) + + self.assertIn('attempt 3', prompt) + self.assertIn('cherry-abc123-fix3', prompt) + self.assertIn('attempt 3', task_desc) + + def test_uses_um_build(self): + """Test that prompt uses 'um build sandbox' for sandbox""" + failed_jobs = [ + gitlab.FailedJob( + id=1, name='build:sandbox', stage='build', + web_url='https://gitlab.com/job/1', + log_tail='error'), + ] + prompt, _ = agent.build_pipeline_fix_prompt( + 42, 'cherry-abc123', failed_jobs, 'ci', 'master', '', 1) + + self.assertIn('um build sandbox', prompt) + + def test_extracts_board_names(self): + """Test that board names are extracted from job names""" + failed_jobs = [ + gitlab.FailedJob( + id=1, name='build:imx8mm_venice', stage='build', + web_url='https://gitlab.com/job/1', + log_tail='error'), + gitlab.FailedJob( + id=2, name='build:rpi_4', stage='build', + web_url='https://gitlab.com/job/2', + log_tail='error'), + ] + prompt, _ = agent.build_pipeline_fix_prompt( + 42, 'cherry-abc123', failed_jobs, 'ci', 'master', '', 1) + + # Should include both boards plus sandbox in the buildman command + self.assertIn('buildman', prompt) + self.assertIn('imx8mm_venice', prompt) + self.assertIn('rpi_4', prompt) + self.assertIn('sandbox', prompt) + + def test_buildman_for_multiple_boards(self): + """Test that buildman is used for building multiple boards""" + failed_jobs = [ + gitlab.FailedJob( + id=1, name='build:coral', stage='build', + web_url='https://gitlab.com/job/1', + log_tail='error'), + ] + prompt, _ = agent.build_pipeline_fix_prompt( + 42, 'cherry-abc123', failed_jobs, 'ci', 'master', '', 1) + + self.assertIn('buildman -o /tmp/pickman', prompt) + self.assertIn('coral', prompt) + + +class TestProcessPipelineFailures(unittest.TestCase): + """Tests for process_pipeline_failures function.""" + + def setUp(self): + """Set up test fixtures.""" + fd, self.db_path = tempfile.mkstemp(suffix='.db') + os.close(fd) + os.unlink(self.db_path) + + def tearDown(self): + """Clean up test fixtures.""" + if os.path.exists(self.db_path): + os.unlink(self.db_path) + database.Database.instances.clear() + + def _make_mr(self, iid=1, pipeline_status='failed', pipeline_id=100, + needs_rebase=False, has_conflicts=False): + """Helper to create a PickmanMr with pipeline fields""" + return gitlab.PickmanMr( + iid=iid, + title=f'[pickman] Test MR {iid}', + web_url=f'https://gitlab.com/mr/{iid}', + source_branch=f'cherry-test-{iid}', + description='Test description', + has_conflicts=has_conflicts, + needs_rebase=needs_rebase, + pipeline_status=pipeline_status, + pipeline_id=pipeline_id, + ) + + def test_skips_running(self): + """Test that running pipelines are skipped""" + with terminal.capture(): + dbs = database.Database(self.db_path) + dbs.start() + + mrs = [self._make_mr(pipeline_status='running')] + with mock.patch.object(control, 'run_git'): + result = control.process_pipeline_failures( + 'ci', mrs, dbs, 'master', 3) + + self.assertEqual(result, 0) + dbs.close() + + def test_skips_success(self): + """Test that successful pipelines are skipped""" + with terminal.capture(): + dbs = database.Database(self.db_path) + dbs.start() + + mrs = [self._make_mr(pipeline_status='success')] + with mock.patch.object(control, 'run_git'): + result = control.process_pipeline_failures( + 'ci', mrs, dbs, 'master', 3) + + self.assertEqual(result, 0) + dbs.close() + + def test_skips_already_processed(self): + """Test that already-processed pipelines are skipped""" + with terminal.capture(): + dbs = database.Database(self.db_path) + dbs.start() + + # Pre-record this pipeline + dbs.pfix_add(1, 100, 1, 'success') + dbs.commit() + + mrs = [self._make_mr()] + with mock.patch.object(control, 'run_git'): + result = control.process_pipeline_failures( + 'ci', mrs, dbs, 'master', 3) + + self.assertEqual(result, 0) + dbs.close() + + def test_respects_retry_limit(self): + """Test that retry limit is respected""" + with terminal.capture(): + dbs = database.Database(self.db_path) + dbs.start() + + # Pre-record 3 attempts with different pipeline IDs + dbs.pfix_add(1, 10, 1, 'failure') + dbs.pfix_add(1, 20, 2, 'failure') + dbs.pfix_add(1, 30, 3, 'failure') + dbs.commit() + + mrs = [self._make_mr(pipeline_id=40)] + with mock.patch.object(control, 'run_git'): + with mock.patch.object(gitlab, 'reply_to_mr', + return_value=True): + result = control.process_pipeline_failures( + 'ci', mrs, dbs, 'master', 3) + + # Should have been processed (comment posted) but not fixed + self.assertEqual(result, 0) + dbs.close() + + def test_posts_comment_at_limit(self): + """Test that a comment is posted when retry limit is reached""" + with terminal.capture(): + dbs = database.Database(self.db_path) + dbs.start() + + # Pre-record 3 attempts + dbs.pfix_add(1, 10, 1, 'failure') + dbs.pfix_add(1, 20, 2, 'failure') + dbs.pfix_add(1, 30, 3, 'failure') + dbs.commit() + + mrs = [self._make_mr(pipeline_id=40)] + with mock.patch.object(control, 'run_git'): + with mock.patch.object(gitlab, 'reply_to_mr', + return_value=True) as mock_reply: + control.process_pipeline_failures( + 'ci', mrs, dbs, 'master', 3) + + mock_reply.assert_called_once() + call_args = mock_reply.call_args + self.assertIn('retry limit', call_args[0][2]) + dbs.close() + + def test_processes_failed(self): + """Test processing a failed pipeline""" + with terminal.capture(): + dbs = database.Database(self.db_path) + dbs.start() + + failed_jobs = [ + gitlab.FailedJob(id=1, name='build', stage='build', + web_url='https://gitlab.com/job/1', + log_tail='error'), + ] + mrs = [self._make_mr()] + + with mock.patch.object(control, 'run_git'): + with mock.patch.object(gitlab, 'get_failed_jobs', + return_value=failed_jobs): + with mock.patch.object(agent, 'fix_pipeline', + return_value=(True, 'Fixed it')): + with mock.patch.object( + gitlab, 'push_branch', + return_value=True) as mock_push: + with mock.patch.object(gitlab, 'update_mr_desc', + return_value=True): + with mock.patch.object( + gitlab, 'reply_to_mr', + return_value=True) as mock_reply: + with mock.patch.object( + control, + 'update_history_pipeline_fix'): + result = \ + control.process_pipeline_failures( + 'ci', mrs, dbs, 'master', 3) + + self.assertEqual(result, 1) + # Should be recorded in database + self.assertTrue(dbs.pfix_has(1, 100)) + # Should push the branch + mock_push.assert_called_once_with( + 'ci', 'cherry-test-1', force=True, skip_ci=False) + # Should post a comment on the MR + mock_reply.assert_called_once() + reply_msg = mock_reply.call_args[0][2] + self.assertIn('Fixed it', reply_msg) + self.assertIn('build', reply_msg) + dbs.close() + + def test_skips_skipped_mr(self): + """Test that MRs without pipeline_id are skipped""" + with terminal.capture(): + dbs = database.Database(self.db_path) + dbs.start() + + mrs = [self._make_mr(pipeline_id=None)] + with mock.patch.object(control, 'run_git'): + result = control.process_pipeline_failures( + 'ci', mrs, dbs, 'master', 3) + + self.assertEqual(result, 0) + dbs.close() + + def test_rebases_before_fix(self): + """Test that a branch needing rebase is rebased instead of fixed""" + with terminal.capture(): + dbs = database.Database(self.db_path) + dbs.start() + + mrs = [self._make_mr(needs_rebase=True)] + with mock.patch.object(control, 'run_git'): + with mock.patch.object( + gitlab, 'push_branch', + return_value=True) as mock_push: + with mock.patch.object(agent, 'fix_pipeline') as mock_fix: + result = control.process_pipeline_failures( + 'ci', mrs, dbs, 'master', 3) + + self.assertEqual(result, 1) + # Should push the rebased branch, not call fix_pipeline + mock_push.assert_called_once_with( + 'ci', 'cherry-test-1', force=True, skip_ci=False) + mock_fix.assert_not_called() + # Should be recorded as 'rebased' in database + self.assertTrue(dbs.pfix_has(1, 100)) + dbs.close() + + def test_rebase_with_conflicts_skips(self): + """Test that a failed rebase skips the pipeline fix""" + with terminal.capture(): + dbs = database.Database(self.db_path) + dbs.start() + + mrs = [self._make_mr(has_conflicts=True)] + + def mock_run_git_fn(args): + if args[0] == 'rebase': + raise command.CommandExc('conflict', None) + return '' + + with mock.patch.object(control, 'run_git', + side_effect=mock_run_git_fn): + with mock.patch.object(agent, 'fix_pipeline') as mock_fix: + result = control.process_pipeline_failures( + 'ci', mrs, dbs, 'master', 3) + + self.assertEqual(result, 0) + mock_fix.assert_not_called() + dbs.close() + + def test_disabled_with_zero(self): + """Test that fix_retries=0 is handled in do_step (not called)""" + mock_mr = gitlab.PickmanMr( + iid=123, + title='[pickman] Test MR', + web_url='https://gitlab.com/mr/123', + source_branch='cherry-test', + description='Test', + pipeline_status='failed', + pipeline_id=100, + ) + with mock.patch.object(control, 'run_git'): + with mock.patch.object(gitlab, 'get_merged_pickman_mrs', + return_value=[]): + with mock.patch.object(gitlab, 'get_open_pickman_mrs', + return_value=[mock_mr]): + with mock.patch.object( + control, 'process_pipeline_failures') as mock_ppf: + args = argparse.Namespace( + cmd='step', source='us/next', + remote='ci', target='master', + max_mrs=1, fix_retries=0) + with terminal.capture(): + control.do_step(args, None) + + mock_ppf.assert_not_called() + + +class TestStepFixRetries(unittest.TestCase): + """Tests for --fix-retries argument parsing.""" + + def test_default(self): + """Test default fix-retries value for step""" + args = pickman.parse_args(['step', 'us/next']) + self.assertEqual(args.fix_retries, 3) + + def test_custom(self): + """Test custom fix-retries value for step""" + args = pickman.parse_args(['step', 'us/next', '--fix-retries', '5']) + self.assertEqual(args.fix_retries, 5) + + def test_zero_disables(self): + """Test that fix-retries=0 is accepted""" + args = pickman.parse_args(['step', 'us/next', '--fix-retries', '0']) + self.assertEqual(args.fix_retries, 0) + + def test_poll_default(self): + """Test default fix-retries value for poll""" + args = pickman.parse_args(['poll', 'us/next']) + self.assertEqual(args.fix_retries, 3) + + def test_poll_custom(self): + """Test custom fix-retries value for poll""" + args = pickman.parse_args(['poll', 'us/next', '--fix-retries', '1']) + self.assertEqual(args.fix_retries, 1) + + if __name__ == '__main__': unittest.main() -- 2.43.0
From: Simon Glass <simon.glass@canonical.com> When do_step() finds the number of open MRs is at the maximum, it returns immediately without processing subtree updates or advancing past fully-processed merges. These actions don't create MRs (subtree updates push to the target branch directly), so they should happen regardless. Currently the user must wait for the next poll interval and for an MR slot to open before subtree updates are applied. Move the _prepare_get_commits() call to before the max_mrs check in do_step(), so subtree updates and source-position advances happen unconditionally. Add an optional 'info' parameter to do_apply() and prepare_apply() so the already-fetched result can be passed through, avoiding a redundant call when below the max MR limit. Co-developed-by: Claude Opus 4.6 <noreply@anthropic.com> Signed-off-by: Simon Glass <simon.glass@canonical.com> --- tools/pickman/control.py | 27 +++++++++++---- tools/pickman/ftest.py | 72 ++++++++++++++++++++++++++-------------- 2 files changed, 67 insertions(+), 32 deletions(-) diff --git a/tools/pickman/control.py b/tools/pickman/control.py index f4d4a43c292..a42927f991d 100644 --- a/tools/pickman/control.py +++ b/tools/pickman/control.py @@ -1663,7 +1663,7 @@ def _prepare_get_commits(dbs, source, args): return info, None -def prepare_apply(dbs, source, branch, args=None): +def prepare_apply(dbs, source, branch, args=None, info=None): """Prepare for applying commits from a source branch Get the next commits, set up the branch name and prints info about @@ -1676,15 +1676,18 @@ def prepare_apply(dbs, source, branch, args=None): branch (str): Branch name to use, or None to auto-generate args (Namespace): Parsed arguments with 'push', 'remote', 'target' (needed for subtree updates) + info (NextCommitsInfo): Pre-fetched commit info from + _prepare_get_commits(), or None to fetch it here Returns: tuple: (ApplyInfo, return_code) where ApplyInfo is set if there are commits to apply, or None with return_code indicating the result (0 for no commits, 1 for error) """ - info, ret = _prepare_get_commits(dbs, source, args) - if ret is not None: - return None, ret + if info is None: + info, ret = _prepare_get_commits(dbs, source, args) + if ret is not None: + return None, ret commits = info.commits @@ -1889,18 +1892,20 @@ def execute_apply(dbs, source, commits, branch_name, args, advance_to=None): # return ret, success, conv_log -def do_apply(args, dbs): +def do_apply(args, dbs, info=None): """Apply the next set of commits using Claude agent Args: args (Namespace): Parsed arguments with 'source' and 'branch' attributes dbs (Database): Database instance + info (NextCommitsInfo): Pre-fetched commit info from + _prepare_get_commits(), or None to fetch during prepare Returns: int: 0 on success, 1 on failure """ source = args.source - info, ret = prepare_apply(dbs, source, args.branch, args) + info, ret = prepare_apply(dbs, source, args.branch, args, info=info) if not info: return ret @@ -2851,6 +2856,14 @@ def do_step(args, dbs): process_pipeline_failures(remote, active_mrs, dbs, args.target, args.fix_retries) + # Process subtree updates and advance past fully-processed merges + # regardless of MR count, since these don't create MRs + info, ret = _prepare_get_commits(dbs, source, args) + if ret is not None: + if ret: + return ret + return 0 + # Only block new MR creation if we've reached the max allowed open MRs max_mrs = args.max_mrs if len(active_mrs) >= max_mrs: @@ -2869,7 +2882,7 @@ def do_step(args, dbs): tout.info('No pending pickman MRs, creating new one...') args.push = True args.branch = None # Let do_apply generate branch name - return do_apply(args, dbs) + return do_apply(args, dbs, info=info) def do_poll(args, dbs): diff --git a/tools/pickman/ftest.py b/tools/pickman/ftest.py index af26cbb4229..ced2c79ac87 100644 --- a/tools/pickman/ftest.py +++ b/tools/pickman/ftest.py @@ -2061,16 +2061,21 @@ class TestStep(unittest.TestCase): source_branch='cherry-test', description='Test', ) + mock_info = control.NextCommitsInfo( + commits=['fake'], merge_found=True, advance_to='abc123') with mock.patch.object(control, 'run_git'): with mock.patch.object(gitlab, 'get_merged_pickman_mrs', return_value=[]): with mock.patch.object(gitlab, 'get_open_pickman_mrs', return_value=[mock_mr]): - args = argparse.Namespace(cmd='step', source='us/next', - remote='ci', target='master', - max_mrs=1, fix_retries=3) - with terminal.capture(): - ret = control.do_step(args, None) + with mock.patch.object( + control, '_prepare_get_commits', + return_value=(mock_info, None)): + args = argparse.Namespace( + cmd='step', source='us/next', remote='ci', + target='master', max_mrs=1, fix_retries=3) + with terminal.capture(): + ret = control.do_step(args, None) self.assertEqual(ret, 0) @@ -2109,18 +2114,23 @@ class TestStep(unittest.TestCase): source_branch='cherry-test', description='Test', ) + mock_info = control.NextCommitsInfo( + commits=['fake'], merge_found=True, advance_to='abc123') with mock.patch.object(control, 'run_git'): with mock.patch.object(gitlab, 'get_merged_pickman_mrs', return_value=[]): with mock.patch.object(gitlab, 'get_open_pickman_mrs', return_value=[mock_mr]): - with mock.patch.object(control, 'do_apply', - return_value=0) as mock_apply: - args = argparse.Namespace(cmd='step', source='us/next', - remote='ci', target='master', - max_mrs=5, fix_retries=3) - with terminal.capture(): - ret = control.do_step(args, None) + with mock.patch.object( + control, '_prepare_get_commits', + return_value=(mock_info, None)): + with mock.patch.object(control, 'do_apply', + return_value=0) as mock_apply: + args = argparse.Namespace( + cmd='step', source='us/next', remote='ci', + target='master', max_mrs=5, fix_retries=3) + with terminal.capture(): + ret = control.do_step(args, None) # With 1 open MR and max_mrs=5, it should try to create a new one self.assertEqual(ret, 0) @@ -2138,17 +2148,23 @@ class TestStep(unittest.TestCase): ) for i in range(3) ] + mock_info = control.NextCommitsInfo( + commits=['fake'], merge_found=True, advance_to='abc123') with mock.patch.object(control, 'run_git'): with mock.patch.object(gitlab, 'get_merged_pickman_mrs', return_value=[]): with mock.patch.object(gitlab, 'get_open_pickman_mrs', return_value=mock_mrs): - with mock.patch.object(control, 'do_apply') as mock_apply: - args = argparse.Namespace(cmd='step', source='us/next', - remote='ci', target='master', - max_mrs=3, fix_retries=3) - with terminal.capture(): - ret = control.do_step(args, None) + with mock.patch.object( + control, '_prepare_get_commits', + return_value=(mock_info, None)): + with mock.patch.object(control, 'do_apply') \ + as mock_apply: + args = argparse.Namespace( + cmd='step', source='us/next', remote='ci', + target='master', max_mrs=3, fix_retries=3) + with terminal.capture(): + ret = control.do_step(args, None) # With 3 open MRs and max_mrs=3, should not create new MR self.assertEqual(ret, 0) @@ -6120,19 +6136,25 @@ class TestProcessPipelineFailures(unittest.TestCase): pipeline_status='failed', pipeline_id=100, ) + mock_info = control.NextCommitsInfo( + commits=['fake'], merge_found=True, advance_to='abc123') with mock.patch.object(control, 'run_git'): with mock.patch.object(gitlab, 'get_merged_pickman_mrs', return_value=[]): with mock.patch.object(gitlab, 'get_open_pickman_mrs', return_value=[mock_mr]): with mock.patch.object( - control, 'process_pipeline_failures') as mock_ppf: - args = argparse.Namespace( - cmd='step', source='us/next', - remote='ci', target='master', - max_mrs=1, fix_retries=0) - with terminal.capture(): - control.do_step(args, None) + control, '_prepare_get_commits', + return_value=(mock_info, None)): + with mock.patch.object( + control, + 'process_pipeline_failures') as mock_ppf: + args = argparse.Namespace( + cmd='step', source='us/next', + remote='ci', target='master', + max_mrs=1, fix_retries=0) + with terminal.capture(): + control.do_step(args, None) mock_ppf.assert_not_called() -- 2.43.0
From: Simon Glass <simon.glass@canonical.com> CI job traces can contain embedded null bytes, which cause a ValueError ("embedded null byte") when the prompt string is passed to the Claude Agent SDK subprocess. Strip null bytes from the trace after decoding. Co-developed-by: Claude Opus 4.6 <noreply@anthropic.com> Signed-off-by: Simon Glass <simon.glass@canonical.com> --- tools/pickman/ftest.py | 35 +++++++++++++++++++++++++++++++++++ tools/pickman/gitlab_api.py | 1 + 2 files changed, 36 insertions(+) diff --git a/tools/pickman/ftest.py b/tools/pickman/ftest.py index ced2c79ac87..5a4d0c433dc 100644 --- a/tools/pickman/ftest.py +++ b/tools/pickman/ftest.py @@ -5787,6 +5787,41 @@ class TestGetFailedJobs(unittest.TestCase): self.assertIn('line 499', result[0].log_tail) + @mock.patch.object(gitlab, 'get_remote_url', + return_value=TEST_SSH_URL) + @mock.patch.object(gitlab, 'get_token', return_value='test-token') + @mock.patch.object(gitlab, 'AVAILABLE', True) + def test_null_bytes_stripped(self, _mock_token, _mock_url): + """Test that null bytes in job logs are stripped""" + trace_bytes = b'before\x00after\nline2\x00end\n' + + mock_job = self._make_mock_job( + 1, 'build:sandbox', 'build', 'https://gitlab.com/job/1', + trace_bytes) + + mock_full_job = mock.MagicMock() + mock_full_job.trace.return_value = trace_bytes + + mock_pipeline = mock.MagicMock() + mock_pipeline.jobs.list.return_value = [mock_job] + + mock_project = mock.MagicMock() + mock_project.pipelines.get.return_value = mock_pipeline + mock_project.jobs.get.return_value = mock_full_job + + mock_glab = mock.MagicMock() + mock_glab.projects.get.return_value = mock_project + + with mock.patch('gitlab.Gitlab', return_value=mock_glab): + with terminal.capture(): + result = gitlab.get_failed_jobs('ci', 100) + + self.assertEqual(len(result), 1) + self.assertNotIn('\0', result[0].log_tail) + self.assertIn('beforeafter', result[0].log_tail) + self.assertIn('line2end', result[0].log_tail) + + class TestBuildPipelineFixPrompt(unittest.TestCase): """Tests for build_pipeline_fix_prompt function.""" diff --git a/tools/pickman/gitlab_api.py b/tools/pickman/gitlab_api.py index d9635d392ee..81918c80c3c 100644 --- a/tools/pickman/gitlab_api.py +++ b/tools/pickman/gitlab_api.py @@ -478,6 +478,7 @@ def get_failed_jobs(remote, pipeline_id, max_log_lines=200): full_job = project.jobs.get(job.id) try: trace = full_job.trace().decode('utf-8', errors='replace') + trace = trace.replace('\0', '') lines = trace.splitlines() log_tail = '\n'.join(lines[-max_log_lines:]) except (AttributeError, gitlab.exceptions.GitlabError): -- 2.43.0
participants (1)
-
Simon Glass