Created
February 19, 2026 16:47
-
-
Save swombat/9f96eb6a1419380029f8c55a1944b114 to your computer and use it in GitHub Desktop.
Google Services CLI for Claude Code - Docs, Drive & Calendar access via command line
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| #!/usr/bin/env python3 | |
| """ | |
| Google Services CLI tool for Claude Code skill. | |
| Supports Google Docs, Drive, and Calendar. | |
| """ | |
| import argparse | |
| import json | |
| import os | |
| import re | |
| import sys | |
| from pathlib import Path | |
| from datetime import datetime, timedelta | |
| # Config directory for storing credentials | |
| CONFIG_DIR = Path.home() / ".claude" / "skills" / "gmail" / "config" | |
| # Account aliases mapping to credential files | |
| # Customize these with your own email addresses | |
| ACCOUNTS = { | |
| "personal": "you@gmail.com", | |
| "work": "you@company.com", | |
| "business": "you@business.io", | |
| } | |
| # Scopes needed for Docs, Drive, Calendar, and Sheets - full access | |
| SCOPES = [ | |
| 'https://www.googleapis.com/auth/documents', | |
| 'https://www.googleapis.com/auth/drive', | |
| 'https://www.googleapis.com/auth/calendar', | |
| ] | |
| def get_credentials(account: str): | |
| """Get or create credentials for the specified account.""" | |
| try: | |
| from google.oauth2.credentials import Credentials | |
| from google.auth.transport.requests import Request | |
| from google_auth_oauthlib.flow import InstalledAppFlow | |
| except ImportError: | |
| print("ERROR: Required packages not installed. Run:", file=sys.stderr) | |
| print(" pip3 install google-api-python-client google-auth-httplib2 google-auth-oauthlib", file=sys.stderr) | |
| sys.exit(1) | |
| # Determine which credential files to use | |
| if account in ACCOUNTS: | |
| email = ACCOUNTS[account] | |
| else: | |
| email = account | |
| safe_name = email.replace("@", "_at_").replace(".", "_") | |
| client_secret = CONFIG_DIR / f"client_secret_{safe_name}.json" | |
| token_file = CONFIG_DIR / f"google_token_{safe_name}.json" # Separate from gmail token | |
| if not client_secret.exists(): | |
| print(f"ERROR: No credentials found for {email}", file=sys.stderr) | |
| print(f"Expected: {client_secret}", file=sys.stderr) | |
| sys.exit(1) | |
| creds = None | |
| if token_file.exists(): | |
| creds = Credentials.from_authorized_user_file(str(token_file), SCOPES) | |
| if not creds or not creds.valid: | |
| if creds and creds.expired and creds.refresh_token: | |
| creds.refresh(Request()) | |
| else: | |
| flow = InstalledAppFlow.from_client_secrets_file(str(client_secret), SCOPES) | |
| creds = flow.run_local_server(port=0) | |
| with open(token_file, 'w') as f: | |
| f.write(creds.to_json()) | |
| return creds | |
| def get_docs_service(account: str): | |
| """Get Google Docs service.""" | |
| from googleapiclient.discovery import build | |
| creds = get_credentials(account) | |
| return build('docs', 'v1', credentials=creds) | |
| def get_drive_service(account: str): | |
| """Get Google Drive service.""" | |
| from googleapiclient.discovery import build | |
| creds = get_credentials(account) | |
| return build('drive', 'v3', credentials=creds) | |
| def get_calendar_service(account: str): | |
| """Get Google Calendar service.""" | |
| from googleapiclient.discovery import build | |
| creds = get_credentials(account) | |
| return build('calendar', 'v3', credentials=creds) | |
| def extract_doc_id(url_or_id: str) -> str: | |
| """Extract document ID from URL or return as-is if already an ID.""" | |
| # Match Google Docs URL pattern | |
| match = re.search(r'/document/d/([a-zA-Z0-9_-]+)', url_or_id) | |
| if match: | |
| return match.group(1) | |
| # Assume it's already an ID | |
| return url_or_id | |
| def extract_text_from_doc(doc: dict) -> str: | |
| """Extract plain text from a Google Doc structure.""" | |
| text_parts = [] | |
| body = doc.get('body', {}) | |
| content = body.get('content', []) | |
| for element in content: | |
| if 'paragraph' in element: | |
| paragraph = element['paragraph'] | |
| for elem in paragraph.get('elements', []): | |
| if 'textRun' in elem: | |
| text_parts.append(elem['textRun'].get('content', '')) | |
| elif 'table' in element: | |
| # Handle tables | |
| table = element['table'] | |
| for row in table.get('tableRows', []): | |
| row_text = [] | |
| for cell in row.get('tableCells', []): | |
| cell_text = [] | |
| for cell_content in cell.get('content', []): | |
| if 'paragraph' in cell_content: | |
| for elem in cell_content['paragraph'].get('elements', []): | |
| if 'textRun' in elem: | |
| cell_text.append(elem['textRun'].get('content', '').strip()) | |
| row_text.append(' '.join(cell_text)) | |
| text_parts.append(' | '.join(row_text) + '\n') | |
| return ''.join(text_parts) | |
| # ============ DOCS COMMANDS ============ | |
| def extract_text_from_body(body: dict) -> str: | |
| """Extract plain text from a document body structure.""" | |
| text_parts = [] | |
| content = body.get('content', []) | |
| for element in content: | |
| if 'paragraph' in element: | |
| paragraph = element['paragraph'] | |
| for elem in paragraph.get('elements', []): | |
| if 'textRun' in elem: | |
| text_parts.append(elem['textRun'].get('content', '')) | |
| elif 'table' in element: | |
| # Handle tables | |
| table = element['table'] | |
| for row in table.get('tableRows', []): | |
| row_text = [] | |
| for cell in row.get('tableCells', []): | |
| cell_text = [] | |
| for cell_content in cell.get('content', []): | |
| if 'paragraph' in cell_content: | |
| for elem in cell_content['paragraph'].get('elements', []): | |
| if 'textRun' in elem: | |
| cell_text.append(elem['textRun'].get('content', '').strip()) | |
| row_text.append(' '.join(cell_text)) | |
| text_parts.append(' | '.join(row_text) + '\n') | |
| return ''.join(text_parts) | |
| def fetch_doc_with_tabs(account: str, doc_id: str) -> dict: | |
| """Fetch a Google Doc with tabs content using direct HTTP request. | |
| The Python client library doesn't support includeTabsContent parameter, | |
| so we make a direct HTTP request to the API. | |
| """ | |
| from google.auth.transport.requests import AuthorizedSession | |
| creds = get_credentials(account) | |
| session = AuthorizedSession(creds) | |
| url = f'https://docs.googleapis.com/v1/documents/{doc_id}?includeTabsContent=true' | |
| response = session.get(url) | |
| if response.status_code != 200: | |
| raise Exception(f"HTTP {response.status_code}: {response.text}") | |
| return response.json() | |
| def cmd_doc_read(args): | |
| """Read a Google Doc, with support for tabs (like Gemini meeting transcripts).""" | |
| doc_id = extract_doc_id(args.doc_id) | |
| try: | |
| # Request with tabs content included via direct HTTP call | |
| doc = fetch_doc_with_tabs(args.account, doc_id) | |
| except Exception as e: | |
| print(f"ERROR: Could not fetch document: {e}", file=sys.stderr) | |
| sys.exit(1) | |
| # Get tabs from the document | |
| tabs = doc.get('tabs', []) | |
| # If --list-tabs, just show available tabs | |
| if getattr(args, 'list_tabs', False): | |
| if args.json: | |
| tab_info = [] | |
| for tab in tabs: | |
| props = tab.get('tabProperties', {}) | |
| tab_info.append({ | |
| "tabId": props.get('tabId'), | |
| "title": props.get('title'), | |
| "index": props.get('index') | |
| }) | |
| print(json.dumps(tab_info, indent=2)) | |
| else: | |
| print(f"Document: {doc.get('title')}") | |
| print(f"Tabs found: {len(tabs)}") | |
| print("-" * 40) | |
| for tab in tabs: | |
| props = tab.get('tabProperties', {}) | |
| print(f" [{props.get('index')}] {props.get('title')} (id: {props.get('tabId')})") | |
| return | |
| # Determine which tab to read | |
| selected_tab = None | |
| tab_arg = getattr(args, 'tab', None) | |
| if tab_arg: | |
| # User specified a tab by index, ID, or partial title match | |
| for tab in tabs: | |
| props = tab.get('tabProperties', {}) | |
| tab_id = props.get('tabId', '') | |
| tab_title = props.get('title', '') | |
| tab_index = props.get('index', 0) | |
| # Match by tab ID (including partial match with t. prefix) | |
| if tab_arg == tab_id or tab_arg == f"t.{tab_id}" or f"t.{tab_arg}" == tab_id: | |
| selected_tab = tab | |
| break | |
| # Match by index | |
| if tab_arg.isdigit() and int(tab_arg) == tab_index: | |
| selected_tab = tab | |
| break | |
| # Match by title (case insensitive partial match) | |
| if tab_arg.lower() in tab_title.lower(): | |
| selected_tab = tab | |
| break | |
| if not selected_tab: | |
| print(f"ERROR: Tab '{tab_arg}' not found. Use --list-tabs to see available tabs.", file=sys.stderr) | |
| sys.exit(1) | |
| else: | |
| # Default to first tab | |
| if tabs: | |
| selected_tab = tabs[0] | |
| # Extract content from selected tab | |
| if selected_tab: | |
| tab_props = selected_tab.get('tabProperties', {}) | |
| doc_tab = selected_tab.get('documentTab', {}) | |
| body = doc_tab.get('body', {}) | |
| content = extract_text_from_body(body) | |
| tab_title = tab_props.get('title', 'Untitled') | |
| else: | |
| # Fallback to old method (document without tabs) | |
| content = extract_text_from_doc(doc) | |
| tab_title = None | |
| if args.json: | |
| output = { | |
| "id": doc.get('documentId'), | |
| "title": doc.get('title'), | |
| "tab": tab_title, | |
| "tabs_available": len(tabs), | |
| "content": content, | |
| } | |
| print(json.dumps(output, indent=2, default=str)) | |
| else: | |
| print(f"Title: {doc.get('title')}") | |
| print(f"ID: {doc.get('documentId')}") | |
| if tab_title and len(tabs) > 1: | |
| print(f"Tab: {tab_title} ({len(tabs)} tabs available, use --tab to select)") | |
| print("=" * 60) | |
| print(content) | |
| # ============ DOC UPDATE COMMAND ============ | |
| def _parse_inline_formatting(text): | |
| """Strip **bold** markers from text, return (clean_text, spans). | |
| Each span is {'type': 'bold', 'start': int, 'end': int} referencing | |
| positions in the clean (marker-free) text. | |
| """ | |
| spans = [] | |
| result = [] | |
| i = 0 | |
| while i < len(text): | |
| if text[i:i+2] == '**': | |
| end = text.find('**', i + 2) | |
| if end != -1: | |
| start_pos = sum(len(r) for r in result) | |
| inner = text[i+2:end] | |
| result.append(inner) | |
| spans.append({'type': 'bold', 'start': start_pos, 'end': start_pos + len(inner)}) | |
| i = end + 2 | |
| continue | |
| result.append(text[i]) | |
| i += 1 | |
| return ''.join(result), spans | |
| def _markdown_to_docs_requests(markdown_text, tab_id): | |
| """Convert markdown to plain text + Google Docs formatting requests. | |
| Returns (plain_text, formatting_requests) where formatting_requests | |
| are batchUpdate request dicts for headings, bold, and bullets. | |
| """ | |
| lines = markdown_text.rstrip('\n').split('\n') | |
| blocks = [] | |
| for line in lines: | |
| stripped = line.rstrip() | |
| if stripped.strip() == '---': | |
| continue # skip horizontal rules; surrounding blank lines provide spacing | |
| if not stripped.strip(): | |
| blocks.append({'type': 'empty', 'raw': ''}) | |
| continue | |
| if stripped.startswith('### '): | |
| blocks.append({'type': 'heading3', 'raw': stripped[4:]}) | |
| elif stripped.startswith('## '): | |
| blocks.append({'type': 'heading2', 'raw': stripped[3:]}) | |
| elif stripped.startswith('# '): | |
| blocks.append({'type': 'heading1', 'raw': stripped[2:]}) | |
| elif stripped.startswith('- '): | |
| blocks.append({'type': 'bullet', 'raw': stripped[2:]}) | |
| else: | |
| blocks.append({'type': 'normal', 'raw': stripped}) | |
| plain_parts = [] | |
| formatting = [] | |
| idx = 1 # Google Docs body content starts at index 1 | |
| heading_map = { | |
| 'heading1': 'HEADING_1', | |
| 'heading2': 'HEADING_2', | |
| 'heading3': 'HEADING_3', | |
| } | |
| for block in blocks: | |
| if block['type'] == 'empty': | |
| plain_parts.append('\n') | |
| idx += 1 | |
| continue | |
| clean_text, inline_spans = _parse_inline_formatting(block['raw']) | |
| block_start = idx | |
| block_end = idx + len(clean_text) + 1 # +1 for trailing \n | |
| plain_parts.append(clean_text + '\n') | |
| # Paragraph style (headings) | |
| if block['type'] in heading_map: | |
| formatting.append({ | |
| 'updateParagraphStyle': { | |
| 'range': {'startIndex': block_start, 'endIndex': block_end, 'tabId': tab_id}, | |
| 'paragraphStyle': {'namedStyleType': heading_map[block['type']]}, | |
| 'fields': 'namedStyleType', | |
| } | |
| }) | |
| # Bullet lists | |
| if block['type'] == 'bullet': | |
| formatting.append({ | |
| 'createParagraphBullets': { | |
| 'range': {'startIndex': block_start, 'endIndex': block_end, 'tabId': tab_id}, | |
| 'bulletPreset': 'BULLET_DISC_CIRCLE_SQUARE', | |
| } | |
| }) | |
| # Inline bold | |
| for span in inline_spans: | |
| formatting.append({ | |
| 'updateTextStyle': { | |
| 'range': { | |
| 'startIndex': block_start + span['start'], | |
| 'endIndex': block_start + span['end'], | |
| 'tabId': tab_id, | |
| }, | |
| 'textStyle': {'bold': True}, | |
| 'fields': 'bold', | |
| } | |
| }) | |
| idx = block_end | |
| return ''.join(plain_parts), formatting | |
| def cmd_doc_update(args): | |
| """Replace the content of a Google Doc tab with new text from a file or stdin.""" | |
| from google.auth.transport.requests import AuthorizedSession | |
| doc_id = extract_doc_id(args.doc_id) | |
| # Read new content from file or stdin | |
| if args.file: | |
| with open(args.file, 'r') as f: | |
| new_content = f.read() | |
| else: | |
| new_content = sys.stdin.read() | |
| if not new_content.strip(): | |
| print("ERROR: No content provided. Use --file or pipe via stdin.", file=sys.stderr) | |
| sys.exit(1) | |
| # Fetch the doc to find the tab and its content length | |
| doc = fetch_doc_with_tabs(args.account, doc_id) | |
| tabs = doc.get('tabs', []) | |
| # Find the target tab | |
| selected_tab = None | |
| tab_arg = getattr(args, 'tab', None) | |
| if tab_arg: | |
| for tab in tabs: | |
| props = tab.get('tabProperties', {}) | |
| tab_id = props.get('tabId', '') | |
| tab_title = props.get('title', '') | |
| tab_index = props.get('index', 0) | |
| if tab_arg == tab_id or tab_arg == f"t.{tab_id}" or f"t.{tab_arg}" == tab_id: | |
| selected_tab = tab | |
| break | |
| if tab_arg.isdigit() and int(tab_arg) == tab_index: | |
| selected_tab = tab | |
| break | |
| if tab_arg.lower() in tab_title.lower(): | |
| selected_tab = tab | |
| break | |
| if not selected_tab: | |
| print(f"ERROR: Tab '{tab_arg}' not found. Available tabs:", file=sys.stderr) | |
| for t in tabs: | |
| p = t.get('tabProperties', {}) | |
| print(f" [{p.get('index')}] {p.get('title')} (id: {p.get('tabId')})", file=sys.stderr) | |
| sys.exit(1) | |
| else: | |
| if tabs: | |
| selected_tab = tabs[0] | |
| tab_props = selected_tab.get('tabProperties', {}) | |
| tab_id = tab_props.get('tabId') | |
| tab_title = tab_props.get('title', 'Untitled') | |
| # Get the end index of existing content in this tab | |
| doc_tab = selected_tab.get('documentTab', {}) | |
| body = doc_tab.get('body', {}) | |
| body_content = body.get('content', []) | |
| if body_content: | |
| end_index = body_content[-1].get('endIndex', 1) | |
| else: | |
| end_index = 1 | |
| # Build batchUpdate requests | |
| requests = [] | |
| # Step 1: Delete all existing content (except the mandatory newline at index 1) | |
| # end_index <= 2 means the tab is empty (just the required trailing newline) | |
| if end_index > 2: | |
| requests.append({ | |
| 'deleteContentRange': { | |
| 'range': { | |
| 'segmentId': '', | |
| 'startIndex': 1, | |
| 'endIndex': end_index - 1, | |
| 'tabId': tab_id, | |
| } | |
| } | |
| }) | |
| # Step 2: Insert content and optionally apply formatting | |
| if args.markdown: | |
| plain_text, format_requests = _markdown_to_docs_requests(new_content, tab_id) | |
| requests.append({ | |
| 'insertText': { | |
| 'location': {'segmentId': '', 'index': 1, 'tabId': tab_id}, | |
| 'text': plain_text, | |
| } | |
| }) | |
| requests.extend(format_requests) | |
| char_count = len(plain_text) | |
| else: | |
| requests.append({ | |
| 'insertText': { | |
| 'location': {'segmentId': '', 'index': 1, 'tabId': tab_id}, | |
| 'text': new_content, | |
| } | |
| }) | |
| char_count = len(new_content) | |
| # Execute the batchUpdate | |
| creds = get_credentials(args.account) | |
| session = AuthorizedSession(creds) | |
| url = f'https://docs.googleapis.com/v1/documents/{doc_id}:batchUpdate' | |
| response = session.post(url, json={'requests': requests}) | |
| if response.status_code != 200: | |
| print(f"ERROR: batchUpdate failed: {response.status_code} {response.text}", file=sys.stderr) | |
| sys.exit(1) | |
| fmt_note = f" ({len(format_requests)} formatting ops)" if args.markdown else "" | |
| print(f"Updated tab '{tab_title}' in document '{doc.get('title')}'") | |
| print(f" Wrote {char_count} characters{fmt_note}") | |
| # ============ COMMENTS COMMANDS ============ | |
| def cmd_doc_comments(args): | |
| """Get comments on a Google Doc.""" | |
| service = get_drive_service(args.account) | |
| doc_id = extract_doc_id(args.doc_id) | |
| try: | |
| comments = [] | |
| page_token = None | |
| while True: | |
| response = service.comments().list( | |
| fileId=doc_id, | |
| fields="comments(id,author,content,quotedFileContent,resolved,createdTime,modifiedTime,replies)", | |
| pageToken=page_token, | |
| includeDeleted=False | |
| ).execute() | |
| comments.extend(response.get('comments', [])) | |
| page_token = response.get('nextPageToken') | |
| if not page_token: | |
| break | |
| except Exception as e: | |
| print(f"ERROR: Could not fetch comments: {e}", file=sys.stderr) | |
| sys.exit(1) | |
| if args.json: | |
| print(json.dumps(comments, indent=2, default=str)) | |
| else: | |
| if not comments: | |
| print("No comments found.") | |
| else: | |
| for c in comments: | |
| author = c.get('author', {}).get('displayName', 'Unknown') | |
| created = c.get('createdTime', '')[:10] | |
| resolved = " [RESOLVED]" if c.get('resolved') else "" | |
| quoted = c.get('quotedFileContent', {}).get('value', '') | |
| content = c.get('content', '') | |
| print(f"--- {author} ({created}){resolved} ---") | |
| if quoted: | |
| print(f" > \"{quoted[:100]}{'...' if len(quoted) > 100 else ''}\"") | |
| print(f" {content}") | |
| # Print replies | |
| for reply in c.get('replies', []): | |
| reply_author = reply.get('author', {}).get('displayName', 'Unknown') | |
| reply_content = reply.get('content', '') | |
| print(f" ↳ {reply_author}: {reply_content}") | |
| print() | |
| # ============ DRIVE COMMANDS ============ | |
| def cmd_drive_list(args): | |
| """List files in Google Drive.""" | |
| service = get_drive_service(args.account) | |
| query_parts = [] | |
| if args.type == "docs": | |
| query_parts.append("mimeType='application/vnd.google-apps.document'") | |
| elif args.type == "sheets": | |
| query_parts.append("mimeType='application/vnd.google-apps.spreadsheet'") | |
| elif args.type == "folders": | |
| query_parts.append("mimeType='application/vnd.google-apps.folder'") | |
| if args.query: | |
| query_parts.append(f"name contains '{args.query}'") | |
| query = " and ".join(query_parts) if query_parts else None | |
| try: | |
| results = service.files().list( | |
| pageSize=args.limit, | |
| fields="files(id, name, mimeType, modifiedTime, webViewLink)", | |
| q=query, | |
| orderBy="modifiedTime desc" | |
| ).execute() | |
| except Exception as e: | |
| print(f"ERROR: Could not list files: {e}", file=sys.stderr) | |
| sys.exit(1) | |
| files = results.get('files', []) | |
| if args.json: | |
| print(json.dumps(files, indent=2, default=str)) | |
| else: | |
| if not files: | |
| print("No files found.") | |
| else: | |
| for f in files: | |
| print(f"{f.get('modifiedTime', 'N/A')[:10]} | {f.get('name')}") | |
| print(f" ID: {f.get('id')}") | |
| print(f" Type: {f.get('mimeType')}") | |
| if f.get('webViewLink'): | |
| print(f" URL: {f.get('webViewLink')}") | |
| print() | |
| def cmd_drive_search(args): | |
| """Search Google Drive.""" | |
| service = get_drive_service(args.account) | |
| try: | |
| results = service.files().list( | |
| pageSize=args.limit, | |
| fields="files(id, name, mimeType, modifiedTime, webViewLink)", | |
| q=f"fullText contains '{args.query}'", | |
| orderBy="modifiedTime desc" | |
| ).execute() | |
| except Exception as e: | |
| print(f"ERROR: Could not search: {e}", file=sys.stderr) | |
| sys.exit(1) | |
| files = results.get('files', []) | |
| if args.json: | |
| print(json.dumps(files, indent=2, default=str)) | |
| else: | |
| print(f"Found {len(files)} files matching: {args.query}\n") | |
| for f in files: | |
| print(f"{f.get('modifiedTime', 'N/A')[:10]} | {f.get('name')}") | |
| print(f" ID: {f.get('id')}") | |
| print() | |
| # ============ CALENDAR COMMANDS ============ | |
| def cmd_calendar_list(args): | |
| """List upcoming calendar events.""" | |
| service = get_calendar_service(args.account) | |
| now = datetime.utcnow() | |
| time_min = now.isoformat() + 'Z' | |
| if args.days: | |
| time_max = (now + timedelta(days=args.days)).isoformat() + 'Z' | |
| else: | |
| time_max = None | |
| try: | |
| events_result = service.events().list( | |
| calendarId='primary', | |
| timeMin=time_min, | |
| timeMax=time_max, | |
| maxResults=args.limit, | |
| singleEvents=True, | |
| orderBy='startTime' | |
| ).execute() | |
| except Exception as e: | |
| print(f"ERROR: Could not fetch events: {e}", file=sys.stderr) | |
| sys.exit(1) | |
| events = events_result.get('items', []) | |
| if args.json: | |
| output = [] | |
| for event in events: | |
| start = event['start'].get('dateTime', event['start'].get('date')) | |
| end = event['end'].get('dateTime', event['end'].get('date')) | |
| output.append({ | |
| "id": event.get('id'), | |
| "summary": event.get('summary', '(No title)'), | |
| "start": start, | |
| "end": end, | |
| "location": event.get('location'), | |
| "description": event.get('description'), | |
| "attendees": [a.get('email') for a in event.get('attendees', [])], | |
| "link": event.get('htmlLink'), | |
| }) | |
| print(json.dumps(output, indent=2, default=str)) | |
| else: | |
| if not events: | |
| print("No upcoming events found.") | |
| else: | |
| for event in events: | |
| start = event['start'].get('dateTime', event['start'].get('date')) | |
| summary = event.get('summary', '(No title)') | |
| print(f"{start} | {summary}") | |
| if event.get('location'): | |
| print(f" Location: {event.get('location')}") | |
| print(f" ID: {event.get('id')}") | |
| print() | |
| def cmd_calendar_today(args): | |
| """Show today's calendar events.""" | |
| service = get_calendar_service(args.account) | |
| now = datetime.utcnow() | |
| start_of_day = now.replace(hour=0, minute=0, second=0, microsecond=0) | |
| end_of_day = start_of_day + timedelta(days=1) | |
| try: | |
| events_result = service.events().list( | |
| calendarId='primary', | |
| timeMin=start_of_day.isoformat() + 'Z', | |
| timeMax=end_of_day.isoformat() + 'Z', | |
| singleEvents=True, | |
| orderBy='startTime' | |
| ).execute() | |
| except Exception as e: | |
| print(f"ERROR: Could not fetch events: {e}", file=sys.stderr) | |
| sys.exit(1) | |
| events = events_result.get('items', []) | |
| if args.json: | |
| output = [] | |
| for event in events: | |
| start = event['start'].get('dateTime', event['start'].get('date')) | |
| output.append({ | |
| "id": event.get('id'), | |
| "summary": event.get('summary', '(No title)'), | |
| "start": start, | |
| "location": event.get('location'), | |
| }) | |
| print(json.dumps(output, indent=2, default=str)) | |
| else: | |
| if not events: | |
| print("No events today.") | |
| else: | |
| print(f"Today's events ({now.strftime('%Y-%m-%d')}):\n") | |
| for event in events: | |
| start = event['start'].get('dateTime', event['start'].get('date')) | |
| # Extract just the time if it's a datetime | |
| if 'T' in start: | |
| time_part = start.split('T')[1][:5] | |
| else: | |
| time_part = "All day" | |
| summary = event.get('summary', '(No title)') | |
| print(f" {time_part} - {summary}") | |
| def cmd_accounts(args): | |
| """List configured accounts.""" | |
| print("Configured account aliases:") | |
| for alias, email in ACCOUNTS.items(): | |
| safe_name = email.replace("@", "_at_").replace(".", "_") | |
| token_file = CONFIG_DIR / f"google_token_{safe_name}.json" | |
| status = "authenticated" if token_file.exists() else "NOT SET UP" | |
| print(f" {alias}: {email} [{status}]") | |
| def main(): | |
| parser = argparse.ArgumentParser( | |
| description="Google Services CLI (Docs, Drive, Calendar)", | |
| formatter_class=argparse.RawDescriptionHelpFormatter | |
| ) | |
| parser.add_argument( | |
| "--account", "-a", | |
| default="personal", | |
| help="Account to use: personal, work, business (or full email)" | |
| ) | |
| parser.add_argument( | |
| "--json", "-j", | |
| action="store_true", | |
| help="Output in JSON format" | |
| ) | |
| subparsers = parser.add_subparsers(dest="command", help="Commands") | |
| # ---- Docs commands ---- | |
| doc_read = subparsers.add_parser("doc", help="Read a Google Doc") | |
| doc_read.add_argument("doc_id", help="Document ID or URL") | |
| doc_read.add_argument("--tab", "-t", help="Tab ID, index, or title to read (default: first tab)") | |
| doc_read.add_argument("--list-tabs", "-l", action="store_true", help="List available tabs instead of reading content") | |
| doc_read.set_defaults(func=cmd_doc_read) | |
| doc_update = subparsers.add_parser("doc-update", help="Replace content of a Google Doc tab") | |
| doc_update.add_argument("doc_id", help="Document ID or URL") | |
| doc_update.add_argument("--tab", "-t", help="Tab ID, index, or title to update (default: first tab)") | |
| doc_update.add_argument("--file", "-f", help="File to read content from (default: stdin)") | |
| doc_update.add_argument("--markdown", "-m", action="store_true", | |
| help="Parse markdown and apply Google Docs formatting (headings, bold, bullets)") | |
| doc_update.set_defaults(func=cmd_doc_update) | |
| doc_comments = subparsers.add_parser("comments", help="Get comments on a Google Doc") | |
| doc_comments.add_argument("doc_id", help="Document ID or URL") | |
| doc_comments.set_defaults(func=cmd_doc_comments) | |
| # ---- Drive commands ---- | |
| drive_list = subparsers.add_parser("drive-list", help="List Drive files") | |
| drive_list.add_argument("--limit", "-n", type=int, default=10, help="Max results") | |
| drive_list.add_argument("--type", "-t", choices=["docs", "sheets", "folders"], help="Filter by type") | |
| drive_list.add_argument("--query", "-q", help="Filter by name") | |
| drive_list.set_defaults(func=cmd_drive_list) | |
| drive_search = subparsers.add_parser("drive-search", help="Search Drive") | |
| drive_search.add_argument("query", help="Search query") | |
| drive_search.add_argument("--limit", "-n", type=int, default=10, help="Max results") | |
| drive_search.set_defaults(func=cmd_drive_search) | |
| # ---- Calendar commands ---- | |
| cal_list = subparsers.add_parser("calendar", help="List upcoming events") | |
| cal_list.add_argument("--limit", "-n", type=int, default=10, help="Max events") | |
| cal_list.add_argument("--days", "-d", type=int, help="Days to look ahead") | |
| cal_list.set_defaults(func=cmd_calendar_list) | |
| cal_today = subparsers.add_parser("today", help="Show today's events") | |
| cal_today.set_defaults(func=cmd_calendar_today) | |
| # ---- Accounts command ---- | |
| accounts = subparsers.add_parser("accounts", help="List configured accounts") | |
| accounts.set_defaults(func=cmd_accounts) | |
| args = parser.parse_args() | |
| if not args.command: | |
| parser.print_help() | |
| sys.exit(1) | |
| args.func(args) | |
| if __name__ == "__main__": | |
| main() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment