Skip to content
Snippets Groups Projects
Commit 1ea95822 authored by Alfredo Gimenez's avatar Alfredo Gimenez Committed by Todd Gamblin
Browse files

Added hadoop, spark, and variant spark+hadoop (#1833)

* Added hadoop, spark, and variant spark+hadoop

* Docstrings, dependency types, urls, copyright

* Flake8 fixes, link dependency for hadoop

* Build type for spark, env problem setting JAVA_HOME
parent b22956ba
No related branches found
No related tags found
No related merge requests found
##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Alfredo Gimenez, gimenez1@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Hadoop(Package):
"""The Apache Hadoop software library is a framework that
allows for the distributed processing of large data sets
across clusters of computers using simple programming models.
"""
homepage = "http://hadoop.apache.org/"
url = "http://mirrors.ocf.berkeley.edu/apache/hadoop/common/hadoop-2.6.4/hadoop-2.6.4.tar.gz"
version('2.6.4', '37019f13d7dcd819727be158440b9442')
depends_on('jdk', type='run')
def install(self, spec, prefix):
def install_dir(dirname):
install_tree(dirname, join_path(prefix, dirname))
install_dir('bin')
install_dir('etc')
install_dir('include')
install_dir('lib')
install_dir('libexec')
install_dir('sbin')
install_dir('share')
##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Alfredo Gimenez, gimenez1@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import shutil
class Spark(Package):
"""Apache Spark is a fast and general engine
for large-scale data processing.
"""
homepage = "http://spark.apache.org"
url = "http://mirrors.ocf.berkeley.edu/apache/spark/spark-2.0.0/spark-2.0.0-bin-without-hadoop.tgz"
variant('hadoop', default=False,
description='Build with Hadoop')
depends_on('jdk', type=('build', 'run'))
depends_on('hadoop', when='+hadoop', type=('build', 'run'))
version('2.0.0', '8a5307d973da6949a385aefb6ff747bb')
version('1.6.2', '304394fbe2899211217f0cd9e9b2b5d9')
version('1.6.1', 'fcf4961649f15af1fea78c882e65b001')
def install(self, spec, prefix):
def install_dir(dirname):
install_tree(dirname, join_path(prefix, dirname))
install_dir('bin')
install_dir('conf')
install_dir('jars')
install_dir('python')
install_dir('R')
install_dir('sbin')
install_dir('yarn')
# required for spark to recognize binary distribution
shutil.copy('RELEASE', prefix)
@when('+hadoop')
def setup_environment(self, spack_env, run_env):
env['JAVA_HOME'] = self.spec['jdk'].prefix
# spack_env.set('JAVA_HOME', self.spec['jdk'].prefix)
hadoop_bin_path = join_path(self.spec['hadoop'].prefix.bin, 'hadoop')
hadoop_bin = Executable(hadoop_bin_path)
hadoop_classpath = hadoop_bin('classpath', return_output=True)
run_env.set('SPARK_DIST_CLASSPATH', hadoop_classpath)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment