diff --git a/lib/spack/spack/fetch_strategy.py b/lib/spack/spack/fetch_strategy.py
index 393e3af9d12112eddbacfc830ffa2054738a452d..5a57703d27bc56d7b5f3f471daeed5130c8194b9 100644
--- a/lib/spack/spack/fetch_strategy.py
+++ b/lib/spack/spack/fetch_strategy.py
@@ -1126,7 +1126,7 @@ def fetch(self):
 
         parsed_url = url_util.parse(self.url)
         if parsed_url.scheme != 's3':
-            raise ValueError(
+            raise FetchError(
                 'S3FetchStrategy can only fetch from s3:// urls.')
 
         tty.msg("Fetching %s" % self.url)
@@ -1392,7 +1392,7 @@ class NoCacheError(FetchError):
 
 
 class FailedDownloadError(FetchError):
-    """Raised wen a download fails."""
+    """Raised when a download fails."""
     def __init__(self, url, msg=""):
         super(FailedDownloadError, self).__init__(
             "Failed to fetch file from URL: %s" % url, msg)
diff --git a/lib/spack/spack/stage.py b/lib/spack/spack/stage.py
index 7869c5f863be795528b0a7b8a7f846126dad47af..d2dd3e6e7adee6d0aa1e6c88ff45a219f05646fc 100644
--- a/lib/spack/spack/stage.py
+++ b/lib/spack/spack/stage.py
@@ -433,11 +433,9 @@ def fetch(self, mirror_only=False):
 
             # Add URL strategies for all the mirrors with the digest
             for url in urls:
-                fetchers.append(fs.from_url_scheme(
-                    url, digest, expand=expand, extension=extension))
-                # fetchers.insert(
-                #     0, fs.URLFetchStrategy(
-                #         url, digest, expand=expand, extension=extension))
+                fetchers.insert(
+                    0, fs.from_url_scheme(
+                        url, digest, expand=expand, extension=extension))
 
             if self.default_fetcher.cachable:
                 for rel_path in reversed(list(self.mirror_paths)):
diff --git a/lib/spack/spack/util/web.py b/lib/spack/spack/util/web.py
index f2afe769c697f15ce2d16ca54f607b58f9a40afb..1fe58d64152e6804219395efcb543bfcfe719d0b 100644
--- a/lib/spack/spack/util/web.py
+++ b/lib/spack/spack/util/web.py
@@ -177,7 +177,12 @@ def read_from_url(url, accept_content_type=None):
 
     # Do the real GET request when we know it's just HTML.
     req.get_method = lambda: "GET"
-    response = _urlopen(req, timeout=_timeout, context=context)
+
+    try:
+        response = _urlopen(req, timeout=_timeout, context=context)
+    except URLError as err:
+        raise SpackWebError('Download failed: {ERROR}'.format(
+            ERROR=str(err)))
 
     if accept_content_type and not is_web_url:
         content_type = response.headers.get('Content-type')