forked from cbay-au/namefi-openhands
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathteam_updates.py
More file actions
631 lines (527 loc) · 27.4 KB
/
team_updates.py
File metadata and controls
631 lines (527 loc) · 27.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
#!/usr/bin/env python3
'''
DEPRECATED: Use team_updates_v2.py instead.
'''
import os
import json
import datetime
import asyncio
import aiohttp
import argparse
from datetime import datetime, timedelta, timezone
import pytz
import sys
import time
import random
import logging
from dotenv import load_dotenv
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
# Load .env file before checking environment variables
load_dotenv()
# Configuration
GITHUB_TOKEN = os.environ.get('MY_GITHUB_TOKEN')
if not GITHUB_TOKEN:
logger.error("GitHub token not found. Please set the MY_GITHUB_TOKEN environment variable.")
exit(1)
# Default values
DEFAULT_ORGANIZATION = "d3servelabs"
DEFAULT_REPOS = [] # Empty means all repos
IGNORED_USERS = ["graphite-app[bot]"] # Default users to ignore
# Command line arguments
parser = argparse.ArgumentParser(description='Generate team updates report')
parser.add_argument('--date-range', type=str, help='Date range in format "YYYY-MM-DD to YYYY-MM-DD" or "YYYY-MM-DD HH:MM to YYYY-MM-DD HH:MM"')
parser.add_argument('--output-path', type=str, help='Path to save the output markdown file')
parser.add_argument('--debug', action='store_true', help='Enable debug logging')
parser.add_argument('--organization', type=str, default=DEFAULT_ORGANIZATION, help=f'GitHub organization name (default: {DEFAULT_ORGANIZATION})')
parser.add_argument('--repos', type=str, default='', help='Comma-separated list of repository names to include (default: all repos)')
parser.add_argument('--ignore-users', type=str, default='', help='Comma-separated list of GitHub usernames to ignore (default: graphite-app)')
args = parser.parse_args()
# Set organization and repos from command line arguments
ORGANIZATION = args.organization
REPOS = [repo.strip() for repo in args.repos.split(',')] if args.repos else []
# Set ignored users from command line arguments
if args.ignore_users:
IGNORED_USERS = [user.strip() for user in args.ignore_users.split(',')]
logger.info(f"Ignoring updates from users: {', '.join(IGNORED_USERS)}")
HEADERS = {
"Accept": "application/vnd.github+json",
"Authorization": f"Bearer {GITHUB_TOKEN}",
"X-GitHub-Api-Version": "2022-11-28"
}
# Rate limiting configuration
MAX_RETRIES = 5
INITIAL_BACKOFF = 1 # seconds
MAX_BACKOFF = 60 # seconds
RATE_LIMIT_THRESHOLD = 100 # Minimum remaining requests before warning
RATE_LIMIT_CRITICAL = 50 # Minimum remaining requests before stopping
LLM_PLEASE_FILL_PROMPT = "<!-- LLM_PLEASE_FILL: Based on the full report below, " + \
"please fill here with a TL;DR section which " + \
"is a short summary of the most important things done by the whole team " + \
"presented in keywords of features or components and their changes. " + \
"The TL;DR should be a total of 1-2 lines. " + \
"Each components should preserve the most important PR_NUMBER with links to the PR. Don't make up PR_NUMBER, use the actual PR_NUMBER. " + \
"PR Link should look like this [#123](https://github.com/<org_name>/<repo_name>/pull/123) where 123 is the PR_NUMBER. " + \
"No need to have \"PR\" in the links. Don't make things up, only use what's in the full report. -->\n\n"
LLM_REPLACE_BELOW_PROMPT = " <!-- LLM_PLEASE_REPLACE_BELOW: please replace the following list of PRs " + \
"with a short summary 5 bullet points most important topics among these PRs, each bullet point should be a short summary of a PR or multiple PRs " + \
"of the same topic, with PR and commit links, then the 6th bullet point " + \
"should be starting with \"Also:\" and then a summary of the other activities of this teammate preserving PR and commit " + \
"links immedaitely after each individual phrases of features or components and their changes. " + \
"PR Link should look like this [#123](https://github.com/<org_name>/<repo_name>/pull/123) where 123 is the PR_NUMBER. " + \
"Commit Link should look like this [#abcd1234](https://github.com/<org_name>/<repo_name>/commit/abcd1234) where abcd1234 is the commit SHA. " + \
"No need to have \"PR\" and \"Commit\" in the links. DO NOT MAKE THINGS UP, ONLY USE WHAT'S IN THE FOLLOWING REPORT-->\n"
LLM_REPLACE_ABOVE_PROMPT = " <!-- LLM_PLEASE_REPLACE_ABOVE -->\n\n"
class RateLimitError(Exception):
"""Custom exception for rate limit errors"""
pass
async def check_rate_limits(session):
"""Check GitHub API rate limits and return remaining requests"""
logger.debug("Checking GitHub API rate limits")
async with session.get("https://api.github.com/rate_limit", headers=HEADERS) as resp:
if resp.status == 200:
limits = await resp.json()
core = limits.get("resources", {}).get("core", {})
remaining = core.get("remaining", 0)
limit = core.get("limit", 0)
reset_time = datetime.fromtimestamp(core.get("reset", 0), tz=timezone.utc)
logger.info(f"GitHub API Rate Limit: {remaining}/{limit} remaining")
logger.info(f"Rate limit resets at: {reset_time.isoformat()}")
if remaining < RATE_LIMIT_THRESHOLD:
logger.warning(f"GitHub API rate limit is getting low! ({remaining} remaining)")
if remaining < RATE_LIMIT_CRITICAL:
logger.error("GitHub API rate limit is too low to proceed!")
raise RateLimitError(f"Rate limit too low: {remaining} remaining")
return remaining
else:
logger.error(f"Error checking rate limits: {resp.status}")
return None
async def make_request(session, url, method="GET", retry_count=0):
"""Make an HTTP request with exponential backoff and rate limit handling"""
logger.debug(f"Making {method} request to {url}")
try:
async with session.request(method, url, headers=HEADERS) as resp:
if resp.status == 403 and "rate limit exceeded" in (await resp.text()).lower():
# Check rate limits
remaining = await check_rate_limits(session)
if remaining is not None and remaining < RATE_LIMIT_CRITICAL:
raise RateLimitError(f"Rate limit exceeded: {remaining} remaining")
# Calculate backoff time with jitter
backoff = min(INITIAL_BACKOFF * (2 ** retry_count), MAX_BACKOFF)
jitter = random.uniform(0, backoff * 0.1)
wait_time = backoff + jitter
logger.warning(f"Rate limit hit. Waiting {wait_time:.2f} seconds before retry {retry_count + 1}/{MAX_RETRIES}")
await asyncio.sleep(wait_time)
if retry_count < MAX_RETRIES:
return await make_request(session, url, method, retry_count + 1)
else:
raise RateLimitError("Max retries exceeded for rate limit")
if resp.status == 200:
return await resp.json()
else:
logger.error(f"Error in request to {url}: {resp.status}")
return None
except aiohttp.ClientError as e:
logger.error(f"Network error: {e}")
if retry_count < MAX_RETRIES:
backoff = min(INITIAL_BACKOFF * (2 ** retry_count), MAX_BACKOFF)
await asyncio.sleep(backoff)
return await make_request(session, url, method, retry_count + 1)
raise
# Helper functions
def get_pst_time():
"""Get current time in PST/PDT"""
ptz = pytz.timezone('America/Los_Angeles')
return datetime.now(ptz)
def get_default_time_range():
"""Get default time range: 6am PT yesterday to 6am PT today"""
now = get_pst_time()
today_6am = now.replace(hour=6, minute=0, second=0, microsecond=0)
if now.hour < 6:
# If current time is before 6am, use 6am two days ago to 6am yesterday
yesterday_6am = today_6am - timedelta(days=1)
day_before_6am = today_6am - timedelta(days=2)
return day_before_6am, yesterday_6am
else:
# Otherwise, use 6am yesterday to 6am today
yesterday_6am = today_6am - timedelta(days=1)
return yesterday_6am, today_6am
def format_date_range(start_time, end_time):
"""Format date range for display"""
if start_time.date() == end_time.date() - timedelta(days=1):
return f"{start_time.strftime('%Y-%m-%d')} to {end_time.strftime('%Y-%m-%d')}"
return f"{start_time.strftime('%Y-%m-%d %H:%M')} PT to {end_time.strftime('%Y-%m-%d %H:%M')} PT"
def is_in_time_range(date_str, start_time, end_time):
"""Check if the given date is within the time range"""
date = datetime.fromisoformat(date_str.replace('Z', '+00:00'))
return start_time <= date <= end_time
def should_ignore_user(username):
"""Check if a user should be ignored"""
return username.lower() in [u.lower() for u in IGNORED_USERS]
async def fetch_repos(session):
"""Fetch all repositories in the organization"""
logger.info("Fetching repositories from organization")
all_repos = []
page = 1
while True:
url = f"https://api.github.com/orgs/{ORGANIZATION}/repos?per_page=100&page={page}"
repos = await make_request(session, url)
if not repos:
break
all_repos.extend([repo["name"] for repo in repos])
logger.debug(f"Fetched {len(repos)} repositories from page {page}")
page += 1
logger.info(f"Total repositories found: {len(all_repos)}")
return all_repos
async def fetch_pull_requests(session, repo):
"""Fetch all pull requests for a repository in our time range"""
logger.debug(f"Fetching pull requests for repository: {repo}")
all_prs = []
page = 1
while True:
url = f"https://api.github.com/repos/{ORGANIZATION}/{repo}/pulls?state=all&per_page=100&page={page}&sort=updated&direction=desc"
prs = await make_request(session, url)
if not prs:
break
all_prs.extend(prs)
logger.debug(f"Fetched {len(prs)} PRs from {repo} page {page}")
page += 1
# Limit to first 3 pages to avoid excessive API calls
if page > 3:
break
logger.debug(f"Total PRs found for {repo}: {len(all_prs)}")
return all_prs
async def fetch_pr_commits(session, repo, pr_number):
"""Fetch all commits for a PR"""
logger.debug(f"Fetching commits for PR #{pr_number} in {repo}")
all_commits = []
page = 1
while True:
url = f"https://api.github.com/repos/{ORGANIZATION}/{repo}/pulls/{pr_number}/commits?per_page=100&page={page}"
commits = await make_request(session, url)
if not commits:
break
all_commits.extend(commits)
logger.debug(f"Fetched {len(commits)} commits from PR #{pr_number} in {repo} page {page}")
page += 1
# Limit to 2 pages to avoid excessive API calls
if page > 2:
break
logger.debug(f"Total commits found for PR #{pr_number} in {repo}: {len(all_commits)}")
return all_commits
async def fetch_branch_commits(session, repo, branch="main", since=None):
"""Fetch all commits for a branch since a specific date"""
logger.debug(f"Fetching commits for branch {branch} in {repo}")
all_commits = []
page = 1
# Build the URL with optional since parameter
url = f"https://api.github.com/repos/{ORGANIZATION}/{repo}/commits?sha={branch}&per_page=100"
if since:
url += f"&since={since.isoformat()}"
while True:
commits = await make_request(session, f"{url}&page={page}")
if not commits:
break
all_commits.extend(commits)
logger.debug(f"Fetched {len(commits)} commits from {repo} branch {branch} page {page}")
page += 1
# Limit to 3 pages to avoid excessive API calls
if page > 3:
break
logger.debug(f"Total commits found for {repo} branch {branch}: {len(all_commits)}")
return all_commits
async def get_default_branch(session, repo):
"""Get the default branch for a repository"""
logger.debug(f"Getting default branch for {repo}")
url = f"https://api.github.com/repos/{ORGANIZATION}/{repo}"
repo_info = await make_request(session, url)
if not repo_info:
logger.warning(f"Could not get default branch for {repo}, using 'main'")
return "main" # Default to main if we can't get the info
default_branch = repo_info.get("default_branch", "main")
logger.debug(f"Default branch for {repo}: {default_branch}")
return default_branch
def parse_date_range(date_range_str):
"""Parse date range string into start and end datetime objects"""
try:
if not date_range_str:
logger.info("No date range provided, using default time range")
return get_default_time_range()
# Parse date range string (format: "YYYY-MM-DD to YYYY-MM-DD" or "YYYY-MM-DD HH:MM to YYYY-MM-DD HH:MM")
parts = date_range_str.split(" to ")
if len(parts) != 2:
raise ValueError("Invalid date range format. Expected 'YYYY-MM-DD to YYYY-MM-DD' or 'YYYY-MM-DD HH:MM to YYYY-MM-DD HH:MM'")
start_str, end_str = parts
# Try parsing with time first
try:
start_time = datetime.strptime(start_str.strip(), "%Y-%m-%d %H:%M")
end_time = datetime.strptime(end_str.strip(), "%Y-%m-%d %H:%M")
except ValueError:
# If that fails, try parsing just the date
start_time = datetime.strptime(start_str.strip(), "%Y-%m-%d")
end_time = datetime.strptime(end_str.strip(), "%Y-%m-%d")
# Convert to PST timezone
ptz = pytz.timezone('America/Los_Angeles')
start_time = ptz.localize(start_time)
end_time = ptz.localize(end_time)
logger.info(f"Parsed date range: {format_date_range(start_time, end_time)}")
return start_time, end_time
except Exception as e:
logger.error(f"Error parsing date range: {e}")
logger.info("Using default time range (6am PT yesterday to 6am PT today)")
return get_default_time_range()
async def process_repo(session, repo, start_time_utc, end_time_utc):
"""Process a single repository and return its updates"""
logger.info(f"Processing repository: {repo}")
updates = {}
try:
# Check rate limits before processing
await check_rate_limits(session)
# Fetch PRs
prs = await fetch_pull_requests(session, repo)
logger.info(f"Found {len(prs)} pull requests in {repo}")
pr_commit_shas = set() # Track PR commit SHAs to avoid duplicates
pr_commit_info = {} # Track PR info for each commit
for pr in prs:
# Skip if PR was not updated in our time range
pr_updated_at = pr.get("updated_at")
pr_number = pr.get("number")
pr_title = pr.get("title")
if not pr_updated_at:
logger.debug(f"PR #{pr_number} '{pr_title}' has no updated_at date, skipping")
continue
pr_date = datetime.fromisoformat(pr_updated_at.replace('Z', '+00:00'))
in_range = start_time_utc <= pr_date <= end_time_utc
logger.info(f"PR #{pr_number} '{pr_title}' updated at {pr_date.isoformat()} - {'IN' if in_range else 'OUT OF'} time range")
if not in_range:
continue
author = pr.get("user", {}).get("login")
if not author:
logger.debug(f"PR #{pr_number} has no author, skipping")
continue
# Skip if author is in the ignore list
if should_ignore_user(author):
logger.info(f"Skipping PR #{pr_number} from ignored user {author}")
continue
# Initialize data structure for this author if not exists
if author not in updates:
updates[author] = {"done": [], "wip": []}
pr_state = pr.get("state")
pr_merged = pr.get("merged_at") is not None
pr_html_url = pr.get("html_url")
# Collect PR commit SHAs to avoid duplicate reporting
pr_commits = await fetch_pr_commits(session, repo, pr_number)
logger.info(f"PR #{pr_number} has {len(pr_commits)} commits")
for commit in pr_commits:
commit_sha = commit.get("sha")
pr_commit_shas.add(commit_sha)
pr_commit_info[commit_sha] = {
"pr_number": pr_number,
"pr_state": pr_state,
"pr_merged": pr_merged,
"pr_title": pr_title
}
logger.debug(f"Tracked commit {commit_sha[:7]} from PR #{pr_number} (state: {pr_state}, merged: {pr_merged})")
# Categorize based on state and merge status
if pr_merged:
# Merged PRs go to done
updates[author]["done"].append({
"repo": repo,
"type": "pr",
"number": pr_number,
"title": pr_title,
"url": pr_html_url
})
logger.info(f"Added merged PR #{pr_number} to done list for {author}")
elif pr_state == "closed":
# Closed but not merged PRs also go to done
updates[author]["done"].append({
"repo": repo,
"type": "pr",
"number": pr_number,
"title": pr_title,
"url": pr_html_url
})
logger.info(f"Added closed PR #{pr_number} to done list for {author}")
elif pr_state == "open":
# Only open (not merged) PRs go to wip
updates[author]["wip"].append({
"repo": repo,
"type": "pr",
"number": pr_number,
"title": pr_title,
"url": pr_html_url
})
logger.info(f"Added open PR #{pr_number} to wip list for {author}")
# Closed but not merged PRs are now treated as done
# Fetch non-PR commits
try:
default_branch = await get_default_branch(session, repo)
logger.info(f"Fetching commits for default branch: {default_branch}")
# Get commits in the time range
branch_commits = await fetch_branch_commits(session, repo, branch=default_branch, since=start_time_utc)
logger.info(f"Found {len(branch_commits)} commits in {repo}")
for commit in branch_commits:
# Skip if commit is not in our time range
commit_date = commit.get("commit", {}).get("committer", {}).get("date")
commit_sha = commit.get("sha", "")[:7]
commit_message = commit.get("commit", {}).get("message", "").split("\n")[0]
if not commit_date:
logger.debug(f"Commit {commit_sha} has no date, skipping")
continue
commit_datetime = datetime.fromisoformat(commit_date.replace('Z', '+00:00'))
in_range = start_time_utc <= commit_datetime <= end_time_utc
if not in_range:
logger.debug(f"Commit {commit_sha} '{commit_message}' at {commit_datetime.isoformat()} - OUT OF time range")
continue
# Check if this commit is part of a PR
if commit.get("sha") in pr_commit_shas:
pr_info = pr_commit_info.get(commit.get("sha"))
if pr_info:
logger.debug(f"Commit {commit_sha} is part of PR #{pr_info['pr_number']} (state: {pr_info['pr_state']}, merged: {pr_info['pr_merged']})")
continue
author = commit.get("author", {}).get("login")
if not author:
# Try to get author from commit data
author_name = commit.get("commit", {}).get("author", {}).get("name")
author_email = commit.get("commit", {}).get("author", {}).get("email")
if author_name:
author = author_name
elif author_email:
author = author_email
else:
logger.debug(f"Commit {commit_sha} has no author, skipping")
continue
# Skip if author is in the ignore list
if should_ignore_user(author):
logger.info(f"Skipping commit {commit_sha} from ignored user {author}")
continue
# Initialize data structure for this author if not exists
if author not in updates:
updates[author] = {"done": [], "wip": []}
commit_html_url = commit.get("html_url")
# Add to done (non-PR commits on the main branch are considered done)
updates[author]["done"].append({
"repo": repo,
"type": "commit",
"sha": commit_sha,
"title": commit_message,
"url": commit_html_url
})
logger.info(f"Added direct commit {commit_sha} to done list for {author}")
except Exception as e:
logger.error(f"Error processing commits for {repo}: {e}")
except RateLimitError as e:
logger.error(f"Rate limit error while processing {repo}: {e}")
raise
except Exception as e:
logger.error(f"Error processing {repo}: {e}")
return updates
async def main():
if args.debug:
logger.setLevel(logging.DEBUG)
logger.debug("Debug logging enabled")
async with aiohttp.ClientSession() as session:
try:
# Check rate limits at start
initial_remaining = await check_rate_limits(session)
if initial_remaining is not None and initial_remaining < RATE_LIMIT_CRITICAL:
logger.error("GitHub API rate limit is too low to proceed!")
raise RateLimitError("Rate limit too low to start processing")
# Get time range from command line or use default
start_time, end_time = parse_date_range(args.date_range)
start_time_utc = start_time.astimezone(timezone.utc)
end_time_utc = end_time.astimezone(timezone.utc)
diff_hours = (end_time - start_time).total_seconds() / 3600
logger.info(f"Fetching team updates for {format_date_range(start_time, end_time)}")
logger.info(f"UTC time range: {start_time_utc.isoformat()} to {end_time_utc.isoformat()}")
# Fetch all repositories
logger.info("Fetching repositories...")
org_repos = await fetch_repos(session)
target_repos = REPOS if REPOS else org_repos
logger.info(f"Found {len(target_repos)} repositories to analyze")
# Process repositories concurrently with rate limit handling
logger.info("Fetching pull requests and commits for each repository...")
tasks = []
for repo in target_repos:
task = asyncio.create_task(process_repo(session, repo, start_time_utc, end_time_utc))
tasks.append(task)
# Wait for all tasks to complete, handling rate limits
repo_updates = []
for task in asyncio.as_completed(tasks):
try:
updates = await task
repo_updates.append(updates)
except RateLimitError as e:
logger.error(f"Rate limit error: {e}")
# Cancel remaining tasks
for t in tasks:
if not t.done():
t.cancel()
raise
# Merge updates from all repositories
all_updates = {}
for updates in repo_updates:
for author, author_updates in updates.items():
if author not in all_updates:
all_updates[author] = {"done": [], "wip": []}
all_updates[author]["done"].extend(author_updates["done"])
all_updates[author]["wip"].extend(author_updates["wip"])
# Generate report
logger.info("Generating team updates report...")
# if hours is 24, use "Daily" else use "Last N Hours"
if diff_hours == 24:
report = f"# [Beta] Updates to share on {datetime.now().strftime('%A')} {end_time.strftime('%Y-%m-%d %H:%M')} PT\n\n"
elif diff_hours == 168:
report = f"# [Beta] Weekly Team Updates ending {end_time.strftime('%Y-%m-%d %H:%M')} PT\n\n"
else:
report = f"# Team Updates for {format_date_range(start_time, end_time)}\n\n"
report += "\n\n"
report += LLM_PLEASE_FILL_PROMPT
# Sort contributors alphabetically
for author in sorted(all_updates.keys()):
report += f"### @{author}\n"
# Done items
if all_updates[author]["done"]:
report += "- done:\n"
report += LLM_REPLACE_BELOW_PROMPT
for item in all_updates[author]["done"]:
if item["type"] == "pr":
report += f" - {item['title']} ([PR #{item['number']}]({item['url']}))\n"
else:
report += f" - {item['title']} ([commit #{item['sha']}]({item['url']}))\n"
report += LLM_REPLACE_ABOVE_PROMPT
# WIP items
if all_updates[author]["wip"]:
report += "- wip:\n"
report += LLM_REPLACE_BELOW_PROMPT
for item in all_updates[author]["wip"]:
if item["type"] == "pr":
report += f" - {item['title']} ([PR #{item['number']}]({item['url']}))\n"
else:
report += f" - {item['title']} ([commit #{item['sha']}]({item['url']}))\n"
report += LLM_REPLACE_ABOVE_PROMPT
report += "\n"
# Write to file
output_file = args.output_path if args.output_path else "team_updates_report.md"
with open(output_file, "w") as f:
f.write(report)
logger.info(f"Team updates report has been saved to {output_file}")
logger.info(f"Content path: {os.path.abspath(output_file)}")
# Final rate limit check
await check_rate_limits(session)
except RateLimitError as e:
logger.error(f"Error: {e}")
logger.error("Please try again later when the rate limit has reset.")
sys.exit(1)
except Exception as e:
logger.error(f"Error: {e}")
sys.exit(1)
if __name__ == "__main__":
# Load .env file when script is run directly (already done globally)
# load_dotenv()
asyncio.run(main())