Skip to content
Snippets Groups Projects
Unverified Commit edf95483 authored by Omar Padron's avatar Omar Padron Committed by Todd Gamblin
Browse files

bugfix: fetch prefers to fetch local mirrors over remote resources (#13545)

- [x] insert at beginning of list so fetch grabs local mirrors before remote resources
- [x] update the S3FetchStrategy so that it throws a SpackError if the fetch fails.  
      Before, it was throwing URLError, which was not being caught in stage.py.
- [x] move error handling out of S3FetchStrategy and into web_util.read_from_url()
- [x] pass string instead of URLError to SpackWebError
parent 7cdb241f
Branches
Tags
No related merge requests found
...@@ -1126,7 +1126,7 @@ def fetch(self): ...@@ -1126,7 +1126,7 @@ def fetch(self):
parsed_url = url_util.parse(self.url) parsed_url = url_util.parse(self.url)
if parsed_url.scheme != 's3': if parsed_url.scheme != 's3':
raise ValueError( raise FetchError(
'S3FetchStrategy can only fetch from s3:// urls.') 'S3FetchStrategy can only fetch from s3:// urls.')
tty.msg("Fetching %s" % self.url) tty.msg("Fetching %s" % self.url)
...@@ -1392,7 +1392,7 @@ class NoCacheError(FetchError): ...@@ -1392,7 +1392,7 @@ class NoCacheError(FetchError):
class FailedDownloadError(FetchError): class FailedDownloadError(FetchError):
"""Raised wen a download fails.""" """Raised when a download fails."""
def __init__(self, url, msg=""): def __init__(self, url, msg=""):
super(FailedDownloadError, self).__init__( super(FailedDownloadError, self).__init__(
"Failed to fetch file from URL: %s" % url, msg) "Failed to fetch file from URL: %s" % url, msg)
......
...@@ -433,11 +433,9 @@ def fetch(self, mirror_only=False): ...@@ -433,11 +433,9 @@ def fetch(self, mirror_only=False):
# Add URL strategies for all the mirrors with the digest # Add URL strategies for all the mirrors with the digest
for url in urls: for url in urls:
fetchers.append(fs.from_url_scheme( fetchers.insert(
url, digest, expand=expand, extension=extension)) 0, fs.from_url_scheme(
# fetchers.insert( url, digest, expand=expand, extension=extension))
# 0, fs.URLFetchStrategy(
# url, digest, expand=expand, extension=extension))
if self.default_fetcher.cachable: if self.default_fetcher.cachable:
for rel_path in reversed(list(self.mirror_paths)): for rel_path in reversed(list(self.mirror_paths)):
......
...@@ -177,7 +177,12 @@ def read_from_url(url, accept_content_type=None): ...@@ -177,7 +177,12 @@ def read_from_url(url, accept_content_type=None):
# Do the real GET request when we know it's just HTML. # Do the real GET request when we know it's just HTML.
req.get_method = lambda: "GET" req.get_method = lambda: "GET"
response = _urlopen(req, timeout=_timeout, context=context)
try:
response = _urlopen(req, timeout=_timeout, context=context)
except URLError as err:
raise SpackWebError('Download failed: {ERROR}'.format(
ERROR=str(err)))
if accept_content_type and not is_web_url: if accept_content_type and not is_web_url:
content_type = response.headers.get('Content-type') content_type = response.headers.get('Content-type')
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment