forked from LLNL/magpie
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmagpie-common-exports
executable file
·285 lines (235 loc) · 9.92 KB
/
magpie-common-exports
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
#!/bin/bash
#############################################################################
# Copyright (C) 2013 Lawrence Livermore National Security, LLC.
# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
# Written by Albert Chu <chu11@llnl.gov>
# LLNL-CODE-644248
#
# This file is part of Magpie, scripts for running Hadoop on
# traditional HPC systems. For details, see <URL>.
#
# Magpie is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Magpie is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Magpie. If not, see <http://www.gnu.org/licenses/>.
#############################################################################
# Export environment variables we promised to export in documentation
# and various variables needed.
#
# This is used by scripts, don't edit this
source ${MAGPIE_SCRIPTS_HOME}/magpie-submission-convert
export MAGPIE_LOCAL_JOB_DIR=${MAGPIE_LOCAL_DIR}/${MAGPIE_JOB_NAME}/${MAGPIE_JOB_ID}
if [ "${MAGPIE_STARTUP_TIME}X" == "X" ]
then
export MAGPIE_STARTUP_TIME=30
fi
if [ "${MAGPIE_SHUTDOWN_TIME}X" == "X" ]
then
export MAGPIE_SHUTDOWN_TIME=30
fi
magpieremotecmd="${MAGPIE_REMOTE_CMD:=ssh}"
magpie_slave_core_count=`cat /proc/cpuinfo | grep processor | wc -l`
if [ "${HADOOP_SETUP}" == "yes" ]
then
export HADOOP_LOCAL_JOB_DIR=${HADOOP_LOCAL_DIR}/${MAGPIE_JOB_NAME}/${MAGPIE_JOB_ID}
export HADOOP_CONF_DIR=${HADOOP_LOCAL_JOB_DIR}/conf
export HADOOP_LOG_DIR=${HADOOP_LOCAL_JOB_DIR}/log
# For jobhistoryserver
export HADOOP_MAPRED_HOME=${HADOOP_HOME}
export HADOOP_YARN_HOME=${HADOOP_HOME}
myusername=`whoami`
export HADOOP_YARN_USER="${myusername}"
export YARN_CONF_DIR=${HADOOP_CONF_DIR}
export YARN_LOG_DIR=${HADOOP_LOG_DIR}
# Unsure if needed, read about these online
export HADOOP_HDFS_HOME=${HADOOP_HOME}
export HADOOP_COMMON_HOME=${HADOOP_HOME}
export YARN_COMMON_HOME=${HADOOP_HOME}
export YARN_RESOURCEMANAGER_ADDRESS="8032"
export YARN_RESOURCEMANAGER_SCHEDULER_ADDRESS="8030"
export YARN_RESOURCEMANAGER_WEBAPP_ADDRESS="8088"
export YARN_RESOURCEMANAGER_WEBAPP_HTTPS_ADDRESS="8090"
export YARN_RESOURCEMANAGER_RESOURCETRACKER_ADDRESS="8031"
export YARN_RESOURCEMANAGER_ADMIN_ADDRESS="8033"
export YARN_NODEMANAGER_LOCALIZER_ADDRESS="8040"
export YARN_NODEMANAGER_WEBAPP_ADDRESS="8042"
# In Hadoop code, default is 8020, but 54310 is the common legacy
# one used throughout the web. I'll keep the 54310 one for now.
export HADOOP_HDFS_NAMENODE_ADDRESS="54310"
export HADOOP_HDFS_NAMENODE_HTTPADDRESS="50070"
export HADOOP_HDFS_NAMENODE_SECONDARY_HTTP_ADDRESS="50090"
export HADOOP_HDFS_NAMENODE_SECONDARY_HTTPS_ADDRESS="50091"
export HADOOP_HDFS_NAMENODE_BACKUP_ADDRESS="50100"
export HADOOP_HDFS_NAMENODE_BACKUP_HTTP_ADDRESS="50100"
export HADOOP_HDFS_DATANODE_ADDRESS="50010"
export HADOOP_HDFS_DATANODE_HTTPADDRESS="50075"
export HADOOP_HDFS_DATANODE_IPCADDRESS="50020"
export HADOOP_JOBHISTORYSERVER_ADDRESS="10020"
export HADOOP_JOBHISTORYSERVER_WEBAPP_ADDRESS="19888"
export MAPRED_JOB_TRACKER_ADDRESS="54311"
export MAPRED_JOB_TRACKER_HTTPADDRESS="50030"
# achu: Why not set dfs.namenode.rpc-address and
# dfs.namenode.servicerpc-address? Unclear of why one might set
# this, internal to Hadoop 2.6.0 code, defaults to fs.defaultFS
# setting. Perhaps legacy and I'm unaware of why.
# achu: Why not set dfs.journalnode.rpc-address,
# dfs.journalnode.http-address, dfs.journalnode.https-address?
# dfs.namenode.shared.edits.dir we do not set.
# achu: Why not set yarn.nodemanager.address? Specific for node
# manager restart, we don't care about this.
# achu: Why not set yarn.timeline-service.address,
# yarn.timeline-service.webapp.address,
# yarn.timeline-service.webapp.https.address? We do not set this,
# it is disabled by default in Hadoop.
# Why check for file existance? Possible magpie-setup-core didn't create it yet
if [ -f "${HADOOP_CONF_DIR}/masters" ]
then
export HADOOP_MASTER_NODE=`head -1 ${HADOOP_CONF_DIR}/masters`
export HADOOP_NAMENODE=`head -1 ${HADOOP_CONF_DIR}/masters`
export HADOOP_NAMENODE_PORT="${HADOOP_HDFS_NAMENODE_ADDRESS}"
fi
if [ -f "${HADOOP_CONF_DIR}/slaves" ]
then
export HADOOP_SLAVE_COUNT=`cat ${HADOOP_CONF_DIR}/slaves|wc -l`
# Assume cores same on each node
export HADOOP_SLAVE_CORE_COUNT=`expr ${magpie_slave_core_count} \* ${HADOOP_SLAVE_COUNT}`
fi
if [ "${HADOOP_SETUP_TYPE}" == "MR1" ] || [ "${HADOOP_SETUP_TYPE}" == "HDFS1" ]
then
hadoopsetupscriptprefix="bin"
hadoopcmdprefix="bin"
dfsadminscript="hadoop"
elif [ "${HADOOP_SETUP_TYPE}" == "MR2" ] || [ "${HADOOP_SETUP_TYPE}" == "HDFS2" ]
then
hadoopsetupscriptprefix="sbin"
hadoopcmdprefix="bin"
dfsadminscript="hdfs"
fi
if [ "${HDFS_FEDERATION_NAMENODE_COUNT}X" == "X" ]
then
export HDFS_FEDERATION_NAMENODE_COUNT=1
fi
fi
if [ "${PIG_SETUP}" == "yes" ]
then
export PIG_LOCAL_JOB_DIR=${PIG_LOCAL_DIR}/${MAGPIE_JOB_NAME}/${MAGPIE_JOB_ID}
export PIG_CONF_DIR=${PIG_LOCAL_JOB_DIR}/conf
pigcmdprefix="bin"
fi
if [ "${HBASE_SETUP}" == "yes" ]
then
export HBASE_LOCAL_JOB_DIR=${HBASE_LOCAL_DIR}/${MAGPIE_JOB_NAME}/${MAGPIE_JOB_ID}
export HBASE_CONF_DIR=${HBASE_LOCAL_JOB_DIR}/conf
export HBASE_LOG_DIR=${HBASE_LOCAL_JOB_DIR}/log
export HBASE_MASTER_PORT="60000"
export HBASE_MASTER_INFO_PORT="60010"
export HBASE_REGIONSERVER_PORT="60020"
export HBASE_REGIONSERVER_INFO_PORT="60030"
# achu: Why not hbase.rest.port? We don't enable the rest
# interface by default.
# achu: Why not hbase.status.multicast.address.port? We don't
# enable hbase.status.published, it's off by default.
# Why check for file existance? Possible magpie-setup-core didn't create it yet
if [ -f "${HBASE_CONF_DIR}/masters" ]
then
export HBASE_MASTER_NODE=`head -1 ${HBASE_CONF_DIR}/masters`
fi
if [ -f "${HBASE_CONF_DIR}/regionservers" ]
then
export HBASE_REGIONSERVER_COUNT=`cat ${HBASE_CONF_DIR}/regionservers|wc -l`
fi
hbasesetupscriptprefix="bin"
hbasecmdprefix="bin"
fi
if [ "${SPARK_SETUP}" == "yes" ]
then
export SPARK_LOCAL_JOB_DIR=${SPARK_LOCAL_DIR}/${MAGPIE_JOB_NAME}/${MAGPIE_JOB_ID}
export SPARK_CONF_DIR=${SPARK_LOCAL_JOB_DIR}/conf
export SPARK_LOG_DIR=${SPARK_LOCAL_JOB_DIR}/log
export SPARK_MASTER_PORT="7077"
export SPARK_MASTER_WEBUI_PORT="8080"
export SPARK_WORKER_WEBUI_PORT="8081"
export SPARK_APPLICATION_DASHBOARD_PORT="4040"
# Why check for file existance? Possible magpie-setup-core didn't create it yet
if [ -f "${SPARK_CONF_DIR}/masters" ]
then
export SPARK_MASTER_NODE=`head -1 ${SPARK_CONF_DIR}/masters`
fi
if [ -f "${SPARK_CONF_DIR}/slaves" ]
then
export SPARK_SLAVE_COUNT=`cat ${SPARK_CONF_DIR}/slaves|wc -l`
# Assume cores same on each node
export SPARK_SLAVE_CORE_COUNT=`expr ${magpie_slave_core_count} \* ${SPARK_SLAVE_COUNT}`
fi
sparksetupscriptprefix="sbin"
sparkcmdprefix="bin"
fi
if [ "${STORM_SETUP}" == "yes" ]
then
export STORM_LOCAL_JOB_DIR=${STORM_LOCAL_DIR}/${MAGPIE_JOB_NAME}/${MAGPIE_JOB_ID}
export STORM_CONF_DIR=${STORM_LOCAL_JOB_DIR}/conf
export STORM_LOG_DIR=${STORM_LOCAL_JOB_DIR}/log
# Default is 8080, but that is same as Spark. So up by 100
export STORM_UI_PORT="8180"
# Default is 8000, for consistency to above, up by 100
export STORM_LOGVIEWER_PORT="8100"
export STORM_DRPC_PORT="3772"
export STORM_DRPC_INVOCATIONS_PORT="3773"
export STORM_DRPC_HTTP_PORT="3774"
export STORM_THRIFT_PORT="6627"
export STORM_SUPERVISOR_SLOTS_STARTING_PORT="6700"
# Why check for file existance? Possible magpie-setup-core didn't create it yet
if [ -f "${STORM_CONF_DIR}/masters" ]
then
export STORM_MASTER_NODE=`head -1 ${STORM_CONF_DIR}/masters`
export STORM_NIMBUS_HOST=${STORM_MASTER_NODE}
fi
if [ -f "${STORM_CONF_DIR}/workers" ]
then
export STORM_WORKERS_COUNT=`cat ${STORM_CONF_DIR}/workers|wc -l`
fi
stormcmdprefix="bin"
fi
if [ "${TACHYON_SETUP}" == "yes" ]
then
export TACHYON_LOCAL_JOB_DIR=${TACHYON_LOCAL_DIR}/${MAGPIE_JOB_NAME}/${MAGPIE_JOB_ID}
export TACHYON_CONF_DIR=${TACHYON_LOCAL_JOB_DIR}/conf
export TACHYON_LOG_DIR=${TACHYON_LOCAL_JOB_DIR}/log
export TACHYON_MASTER_PORT="19998"
export TACHYON_MASTER_WEB_PORT="19999"
export TACHYON_WORKER_PORT="29998"
export TACHYON_WORKER_DATA_PORT="29999"
# Why check for file existance? Possible magpie-setup-core didn't create it yet
if [ -f "${TACHYON_CONF_DIR}/masters" ]
then
export TACHYON_MASTER_NODE=`head -1 ${TACHYON_CONF_DIR}/masters`
fi
if [ -f "${TACHYON_CONF_DIR}/workers" ]
then
export TACHYON_SLAVE_COUNT=`cat ${TACHYON_CONF_DIR}/workers|wc -l`
export TACHYON_FIRST_WORKER_NODE=`head -1 ${TACHYON_CONF_DIR}/workers`
fi
tachyoncmdprefix="bin"
fi
if [ "${ZOOKEEPER_SETUP}" == "yes" ]
then
export ZOOKEEPER_LOCAL_JOB_DIR=${ZOOKEEPER_LOCAL_DIR}/${MAGPIE_JOB_NAME}/${MAGPIE_JOB_ID}
export ZOOKEEPER_CONF_DIR=${ZOOKEEPER_LOCAL_JOB_DIR}/conf
export ZOOKEEPER_LOG_DIR=${ZOOKEEPER_LOCAL_JOB_DIR}/log
export ZOOKEEPER_CLIENT_PORT="2181"
export ZOOKEEPER_PEER_PORT="2888"
export ZOOKEEPER_LEADER_PORT="3888"
# Why check for file existance? Possible magpie-setup-core didn't create it yet
if [ -f "${ZOOKEEPER_CONF_DIR}/zookeeper_master" ]
then
export ZOOKEEPER_MASTER_NODE=`head -1 ${ZOOKEEPER_CONF_DIR}/zookeeper_master`
fi
fi