Here’s a quick one. Fetch a list of all projects in a Jira Cloud instance, then fetch a list of all of the issues in each project. Paginate through the resulting list of issues, and for each issue write the issue key and issue status to a CSV file.
import requests
import json
import base64
import csv
cloud_username = "<email>"
cloud_token = "<token>"
cloud_url = "<cloud URL>"
def credentials_encode(username, password):
credentials_string = f'{username}:{password}'
input_bytes = credentials_string.encode('utf-8')
encoded_bytes = base64.b64encode(input_bytes)
encoded_string = encoded_bytes.decode('utf-8')
return encoded_string
encoded_cloud_credentials = credentials_encode(cloud_username, cloud_token)
# Encode the credentials that we provided
request_headers = {
'Authorization': f'Basic {encoded_cloud_credentials}',
'Content-Type': 'application/json',
'Accept': 'application/json',
'X-Atlassian-token': 'no-check'
}
# Create a header object used for the HTTP GET requests
get_projects = requests.get(f"{cloud_url}/rest/api/latest/project", headers=request_headers)
# Get a list of all projects in the instance
projects_json = json.loads(get_projects.content)
# Convert the list of projects to JSON
with open('project_issues.csv', 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile)
# Create a CSV file
for project in projects_json:
# Iterate through the list of projects
start_at = 0
max_results = 100
# Declare variables used in pagination
project_key = project['key']
# Fetch the key of the current project from the JSON
while True:
# Loop until pagination is complete
get_project_issues = requests.get(f'{cloud_url}/rest/api/latest/search?jql=project="{project_key}"&maxResults={max_results}&startAt={start_at}', headers=request_headers)
# Use the project key to get the issues for the project in question
project_issues_json = json.loads(get_project_issues.content)
# Load the results of the project issue query into JSON
print(f'{cloud_url}/rest/api/latest/search?jql=project="{project_key}"&maxResults={max_results}&startAt={start_at}')
# Print a statement that tracks where pagination is at for each project, so we know that it's actually doing something
if str(get_project_issues.content).__contains__("does not exist for the field"):
# Some projects are archived, and therefor don't play nice
# If the system can't find the project, just end the loop
break
for issue in project_issues_json['issues']:
# Iterate through the issues for this page of results
fields = issue['fields']
csvwriter.writerow([f"{issue['key']} - {fields['status']['name']}"])
# Write the issue key and issue status to the CSV we currently have open
start_at += max_results
# Increment the pagination
if start_at > project_issues_json['total']:
# If the pagination for this project has reached the end,
# break the loop and start on the next project
print(f"{project_key} has {project_issues_json['total']} issues")
break
Leave a Reply