Skip to main content

3 posts tagged with "bigdata"

View All Tags

· 2 min read

demo1

docker run -it apache/spark:python3 /opt/spark/bin/pyspark

# >>> spark.range(1000 * 1000 * 1000).count()

job

docker run --restart=unless-stopped -it -d --name spark -p 4040:4040 -v $(pwd):/app -e TZ=utc-8 apache/spark:python3 tail -f
docker run --restart=unless-stopped -it -d --name spark -p 4040:4040 -v $(pwd):/app -e TZ=utc-8 wolanx/spark /opt/spark/bin/spark-submit /app/test.py

# add jars
cd /opt/spark/jars
wget https://repo1.maven.org/maven2/mysql/mysql-connector-java/8.0.25/mysql-connector-java-8.0.25.jar
pip install requests
# spark-submit --jars /opt/spark/jars/mysql-connector-java-8.0.25.jar /app/test.py

docker exec -it --user=root spark bash
docker exec -it spark /opt/spark/bin/spark-submit /app/test.py

test.py

import ast
import time
from datetime import datetime

import requests
from pyspark.sql import SparkSession, SQLContext
from pyspark.sql.functions import col

spark = SparkSession.builder.getOrCreate()

df = (
spark.read.format("jdbc")
.option("url", "jdbc:mysql://xxx:3306/xxx")
.option("dbtable", "xxx")
.option("user", "xxx")
.option("password", "xxx")
.load()
)
print(df.columns)


def write_to_influxdb(data):
# print(data)
# return
url = "https://ts-xxx.influxdata.tsdb.aliyuncs.com:8086/write?db=xxx&u=xxx&p=xxx&precision=s"
headers = {'Content-Type': 'text/plain'}
r = requests.post(url, data=data, headers=headers)
print(r.text)


def process_row(row):
rowMap: dict = ast.literal_eval(row['data'].replace('nan', 'None').replace(' _', '\\ _'))
fields = ','.join([f"{k}={v}" for k, v in rowMap.items() if v is not None])
write_to_influxdb(f"sensor_0s,SNO=#device({row['device_id']}).perf {fields}")


def doRun():
while True:
current_second = int(time.time()) % 60

if current_second == 0:
rDt = datetime.now().replace(second=0)
df2 = df.where((df.d == rDt.day) & (df.his == rDt.strftime("%H:%M:%S"))).orderBy(
col("id").desc()) # .limit(5)
# print(df2.show())

df2.foreach(process_row)

time.sleep(1)


doRun()

cluster

spark-shell --master local

· One min read

https://ci.apache.org/projects/flink/flink-docs-stable/zh/

tar -xzf flink-1.13.0-bin-scala_2.11.tgz cd flink-1.13.0

https://ci.apache.org/projects/flink/flink-docs-stable/zh/

./bin/flink run -m flink.cc5ee3108d340437b956b5d18bf1a9ba7.cn-shanghai.alicontainer.com -py asdf.py ./bin/flink run -m flink.cc5ee3108d340437b956b5d18bf1a9ba7.cn-shanghai.alicontainer.com:80 examples/streaming/WordCount.jar

./bin/flink run -pyfs ./examples/python/table/batch -pym word_count

· 3 min read

下载地址

centos http://www.centoscn.com/CentosSoft/
jdk1.7 http://www.oracle.com/technetwork/java/javase/downloads/jdk7-downloads-1880260.html
hadoop http://apache.fayea.com/hadoop/common/

修改eth0
cat /etc/sysconfig/network-scripts/ifcfg-eth0

DEVICE=eth0
TYPE=Ethernet
ONBOOT=yes
BOOTPROTO=static
IPADDR=192.168.1.101
NETMASK=255.255.255.0
GATEWAY=192.168.1.1

关闭iptables
service iptables stop
chkconfig iptables off

# selinux 关闭
/etc/selinux/config
SELINUX=disabled

配置dns
/etc/resolv.conf
nameserver 180.168.255.118

rm -f /etc/udev/rules.d/70-persistent-net.rules #mac地址conf

mount -t vboxsf BaiduShare /mnt/bdshare/

jdk 安装


jdk 安装配置
rpm -ivh jdk-7u79-linux-x64.rpm
cd /usr/java/
vi ~/.bashrc
export JAVA_HOME=/usr/java/latest
export PATH=$PATH:$JAVA_HOME/bin
env | grep JAVA
reboot
env | grep JAVA

02.hdfs单机和集群的配置安装

hadoop 下载安装

wget http://apache.fayea.com/hadoop/common/hadoop-2.5.2/hadoop-2.5.2.tar.gz
md5sum hadoop-2.6.0.tar.gz | tr "a-z" "A-Z"
tar -zxvf hadoop-2.5.2.tar.gz
ln -sf hadoop-2.5.2 hadoop #链接

vi ~/.bashrc
export JAVA_HOME=/usr/java/latest
export HADOOP_HOME=/bao/hadoop
export PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin

无密码登录

ssh-keygen -t dsa -P '' -f ~/.ssh/id_dsa
cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys

配置看官方文档

Single Node Setup

C:\Windows\System32\drivers\etc\hosts
vi /etc/hosts
192.168.1.201 hadoop1

vi core-site.xml # /bao/hadoop/etc/hadoop
<configuration>
<property>
<name>fs.default.name</name>
<value>hdfs://hadoop1:9000</value>
</property>
</configuration>

vi hdfs-site.xml # /bao/hadoop/etc/hadoop
<configuration>
<property>
<name>dfs.name.dir</name>
<value>/hdata/namenode</value>
</property>
<property>
<name>dfs.data.dir</name>
<value>/hdata/datanode</value>
</property>
<property>
<name>dfs.tmp.dir</name>
<value>/hdata/tmp</value>
</property>
<property>
<name>dfs.replication</name>
<value>1</value><!-- 大约0 0:Requested replication 0 is less than the required minimum 1 -->
</property>
</configuration>

vi slaves # /bao/hadoop/etc/hadoop
hadoop1

初始化 启动

hdfs namenode -format

start-dfs.sh
stop-dfs.sh

jps #看进程
netstat -anp | grep java

log

cd /bao/hadoop/logs
tail -n50 -f hadoop-root-namenode-localhost.localdomain.log

curd

hdfs dfs -put /bao/hadoop-2.5.2.tar.gz /
hdfs dfs -ls /

集群

复制一个hadoop2

hostnamectl set-hostname hadoop2

vi hdfs-site.xml # /bao/hadoop/etc/hadoop
<configuration>
<property>
<name>dfs.name.dir</name>
<value>/hdata/namenode</value>
</property>
<property>
<name>dfs.data.dir</name>
<value>/hdata/datanode</value>
</property>
<property>
<name>dfs.tmp.dir</name>
<value>/hdata/tmp</value>
</property>
<property>
<name>dfs.replication</name>
<value>2</value><!-- 大约0 0:Requested replication 0 is less than the required minimum 1 -->
</property>
</configuration>

vi slaves # /bao/hadoop/etc/hadoop
hadoop1
hadoop2

初始化启动

cd /hdata
rm -rf *

stop-dfs.sh
hdfs namenode -format
start-dfs.sh

# http://192.168.1.201:50070/dfshealth.html#tab-datanode
检查是不是有两个 Node
#hadoop checknative -a #检查
#warn

yum -y install svn ncurses-devel gcc*
yum -y install lzo-devel zlib-devel autoconf automake libtool cmake openssl –devel

#安装 protobuf(不安装,编译将无法完成)
Hadoop使用protocol buffer进行通信,需要下载和安装protobhf-2.5.0.tar.gz
cd protobuf - 2.5.0
./configure
make
make install
protoc –-version

cd /hadoop-2.5.2-src
mvn package -Pdist,native -DskipTests -Dtar

export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_COMMON_HOME=$HADOOP_HOME
export HADOOP_HDFS_HOME=$HADOOP_HOME
export YARN_HOME=$HADOOP_HOME
export HADOOP_COMMON_LIB_NATIVE_DIR="/app/hadoop/lib/native"
export HADOOP_OPTS="-Djava.library.path=/app/hadoop/lib/native"

03.hdfs应用-云存储系统1

windows java

eclipse-jee-luna-R-win32-x86_64.zip
jdk-7u45-nb-7_4-windows-x64.exe

A:\eclipse\eclipse.ini
-vm
C:\Program Files\Java\jdk1.7.0_45\bin

jetty

Help -> Eclipse Marketplace
Search jetty
Run-Jetty-Run install

maven

File -> New -> Maven Project
maven-archetype-webapp

web -> Bulid Path -> Configure Bulid Path
Jave Bulid Path -> Soure -> 2个missing 的 Remove

web -> Source Foloer
src/main/java
src/main/test

web -> Bulid Path -> Configure Bulid Path
Jave Bulid Path -> Output folder -> target/classes * 3

Peferences -> jre -> Installed JREs -> Check version

web -> Bulid Path -> Configure Bulid Path
Libraries -> JRE -> Edit -> Workspace default JRE

06.hdfs应用-云存储系统4