X-Git-Url: https://gerrit.fd.io/r/gitweb?p=csit.git;a=blobdiff_plain;f=resources%2Ftools%2Fpresentation%2Finput_data_files.py;h=63f91972e526ab75b8843d1818d94da41b296193;hp=023d52a0d6de5be49d1eb4b4a5eddecf5bf8af08;hb=353c822cb93de90513bc8f380939e8846ac5f65d;hpb=4d5e0d3bc3e5e58ebc91d52766ab9d146c52ba58 diff --git a/resources/tools/presentation/input_data_files.py b/resources/tools/presentation/input_data_files.py index 023d52a0d6..63f91972e5 100644 --- a/resources/tools/presentation/input_data_files.py +++ b/resources/tools/presentation/input_data_files.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020 Cisco and/or its affiliates. +# Copyright (c) 2021 Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: @@ -21,7 +21,7 @@ import gzip from os import rename, mkdir from os.path import join -from http.client import responses, IncompleteRead +from http.client import responses, HTTPException from zipfile import ZipFile, is_zipfile, BadZipfile import requests @@ -30,6 +30,8 @@ from requests.adapters import HTTPAdapter, Retry from requests.exceptions import RequestException from requests import codes +from urllib3.exceptions import HTTPError + from pal_errors import PresentationError @@ -89,9 +91,9 @@ def _download_file(url, file_name, arch=False, verify=True, repeat=1): session.mount(u"https://", adapter) return session + success = False while repeat: repeat -= 1 - success = False session = None try: logging.info(f" Connecting to {url} ...") @@ -137,7 +139,7 @@ def _download_file(url, file_name, arch=False, verify=True, repeat=1): success = True repeat = 0 - except IncompleteRead as err: + except (HTTPException, HTTPError) as err: logging.error(f"Connection broken:\n{repr(err)}") except RequestException as err: logging.error(f"HTTP Request exception:\n{repr(err)}") @@ -210,47 +212,44 @@ def download_and_unzip_data_file(spec, job, build, pid): :rtype: bool """ - # Try to download .gz from s3_storage + success = False + file_name = spec.input[u"file-name"] - url = u"{0}/{1}".format( - spec.environment[u'urls'][u'URL[S3_STORAGE,LOG]'], - spec.input[u'download-path'].format( - job=job, build=build[u'build'], filename=file_name - ) - ) new_name = join( spec.environment[u"paths"][u"DIR[WORKING,DATA]"], f"{job}{SEPARATOR}{build[u'build']}{SEPARATOR}{file_name}" ) - - logging.info(f"Trying to download {url}") - arch = bool(spec.configuration.get(u"archive-inputs", True)) - success, downloaded_name = _download_file( - url, new_name, arch=arch, verify=False, repeat=3 - ) + downloaded_name = u"" - if not success: - # Try to download .gz from logs.fd.io - file_name = spec.input[u"file-name"] + # Try to download .gz from s3_storage + for path in spec.input[u'download-path']: url = u"{0}/{1}".format( - spec.environment[u'urls'][u'URL[NEXUS,LOG]'], - spec.input[u'download-path'].format( - job=job, build=build[u'build'], filename=file_name - ) + spec.environment[u'urls'][u'URL[S3_STORAGE,LOG]'], + path.format(job=job, build=build[u'build'], filename=file_name) ) - new_name = join( - spec.environment[u"paths"][u"DIR[WORKING,DATA]"], - f"{job}{SEPARATOR}{build[u'build']}{SEPARATOR}{file_name}" - ) - logging.info(f"Trying to download {url}") - - arch = bool(spec.configuration.get(u"archive-inputs", True)) - success, downloaded_name = _download_file(url, new_name, arch=arch) + success, downloaded_name = _download_file( + url, new_name, arch=arch, verify=False, repeat=3 + ) + if success: + break if not success: + # Try to download .gz from logs.fd.io + for path in spec.input[u'download-path']: + url = u"{0}/{1}".format( + spec.environment[u'urls'][u'URL[NEXUS,LOG]'], + path.format(job=job, build=build[u'build'], filename=file_name) + ) + logging.info(f"Trying to download {url}") + success, downloaded_name = _download_file( + url, new_name, arch=arch, verify=True, repeat=3 + ) + if success: + break + if not success: # Try to download .gz or .zip from docs.fd.io file_name = (spec.input[u"file-name"], spec.input[u"zip-file-name"]) release = re.search(REGEX_RELEASE, job).group(2) @@ -283,28 +282,25 @@ def download_and_unzip_data_file(spec, job, build, pid): xml_file.write(file_content) break - if not success: - - # Try to download .zip from jenkins.fd.io - file_name = spec.input[u"zip-file-name"] - download_path = spec.input[u"zip-download-path"] - if job.startswith(u"csit-"): - url = spec.environment[u"urls"][u"URL[JENKINS,CSIT]"] - else: - raise PresentationError(f"No url defined for the job {job}.") - - full_name = download_path.format( - job=job, build=build[u"build"], filename=file_name - ) - url = u"{0}/{1}".format(url, full_name) - new_name = join( - spec.environment[u"paths"][u"DIR[WORKING,DATA]"], - f"{job}{SEPARATOR}{build[u'build']}{SEPARATOR}{file_name}" - ) - - logging.info(f"Downloading {url}") - - success, downloaded_name = _download_file(url, new_name) + # if not success: + # # Try to download .zip from jenkins.fd.io + # file_name = spec.input[u"zip-file-name"] + # download_path = spec.input[u"zip-download-path"] + # if job.startswith(u"csit-"): + # url = spec.environment[u"urls"][u"URL[JENKINS,CSIT]"] + # else: + # raise PresentationError(f"No url defined for the job {job}.") + # + # full_name = download_path.format( + # job=job, build=build[u"build"], filename=file_name + # ) + # url = u"{0}/{1}".format(url, full_name) + # new_name = join( + # spec.environment[u"paths"][u"DIR[WORKING,DATA]"], + # f"{job}{SEPARATOR}{build[u'build']}{SEPARATOR}{file_name}" + # ) + # logging.info(f"Downloading {url}") + # success, downloaded_name = _download_file(url, new_name) if success and downloaded_name.endswith(u".zip"): if not is_zipfile(downloaded_name):