liunx命令笔记

########--nginx命令--
/usr/local/webserver/nginx/sbin/nginx -s reload            # 重新载入配置文件
/usr/local/webserver/nginx/sbin/nginx -s reopen            # 重启 Nginx
/usr/local/webserver/nginx/sbin/nginx -s stop              # 停止 Nginx

rpm -qa |grep mysql //查看软件的安装路径




yum install net-tools //下载网络命令 使用ifconfig查看ip地址
//查看sql服务和进程
ps -ef|grep mysqld 
 
service mysqld status
source /etc/profile  //刷新环境配置

C:WINDOWSsystem32driversetc
keytool -genkeypair -alias "tomcat" -keyalg "RSA"

Linux常用命令
echo "adsffdafas">aaaa
mkdir -p ppp/aaa  创建多级目录
pwd 显示当前的工作目录    // /myService/jdk/jdk1.8.0_201
cp file1 /目录/file2  移动并重命名
rm -rf ppp/bbb 强制删除文件和目录 -r删除文件有效
cat  /etc/issue  查看etc下issue文件的内容
tar -zxvf dir1.tar.gz 解压文件
tar -zcvf dir1.tar.gz dir1  压缩文件
unzip 文件名.zip        //腾讯云上的CentOS解压文件的命令是
yum install -y unzip zip    //安装解压命令


vi 查看 wq 保存退出 w 不保存退出
find /usr -name "*.ini"  //搜索文件

-----------远程服务相关命令---------------
rpm -qa | grep ssh    //查看是否安装远程服务
yum install openssh*  //安装缺失的远程服务包
vi /etc/ssh/sshd_config  //查看远程系统的配置
cat /etc/redhat-release  //查看自己的系统版本
/etc/init.d/ssh restart  //重启远程登录服务
CentOS 7 service sshd restart    //重启远程
ctrl+D;退出远程登录


----------------------防火墙相关的配置----------
firewall-cmd --state    //查看防火墙的状态firewall
systemctl  start   firewalld.service//开启/关闭防火墙
systemctl stop firewalld.service    //禁止firewall开机启动
systemctl disable firewalld.service  //接触禁止开启启动
service  iptables  restart    //防火墙启动
firewall-cmd --query-port=3306/tcp  //查看某个端口是否开放
firewall-cmd --add-port=3306/tcp --permanent  //开放永久端口
firewall-cmd --reload     //防火墙重新加载配置

---------端口被占用---------------------



----------mysql安装步骤----------------------------
jdbc.url=jdbc:mysql://62.234.92.2:3306/x5?useUnicode=true&characterEncoding=utf-8&zeroDateTimeBehavior=round
jdbc.username=sa
jdbc.password=98KM24@#uzi    // XYkj365@#jll  //XYKEJI365@com
GRANT ALL PRIVILEGES ON *.* TO 'sa'@'106.117.81.60' IDENTIFIED BY 'XYkj365@#jll' WITH GRANT OPTION;

cat /etc/redhat-release 查看liunx版本号
https://blog.csdn.net/zhwyj1019/article/details/80274269 //彻底删除mysql
yum -y remove mysql57-community-release-el7-10.noarch    //删除noarch的包


sudo yum -y install wget   //安装weget命令,用于安装下载安装mysql
---安装MySQL官方的 Yum Repository
wget -i -c http://dev.mysql.com/get/mysql57-community-release-el7-10.noarch.rpm
--Yum Repository,大概25KB的样子,然后就可以直接yum安装了
yum -y install mysql57-community-release-el7-10.noarch.rpm
--安装MySQL服务器。
yum -y install mysql-community-server
systemctl start  mysqld.service  //启动mysql服务
systemctl status mysqld.service //查看mysql运行状态
grep "password" /var/log/mysqld.log   //查找mysql初始密码(ee7t/eUEXC_8)
mysql -uroot -p  //进入mysql数据库

SHOW VARIABLES LIKE 'validate_password%';  //查看mysql密码设置规范//未修改密码之前不能看
set global validate_password_policy=0;    ///修改mysql密码设置规范
set global validate_password_length=1;
ALTER USER 'root'@'localhost' IDENTIFIED BY 'new password';//设置新密码
yum -y remove mysql57-community-release-el7-10.noarch  //卸载,防止yum之后更新
GRANT ALL PRIVILEGES ON *.* TO 'root'@'192.168.0.0' IDENTIFIED BY 'root' WITH GRANT OPTION; //开启远程连接
flush privileges; //刷新
//windows下mysql数据转储到liunx的mysql下,查找出my.ini或是my.cof文件    我的是centos7.2版本的,文件位置在 /etc/my.cnf
find / -name my.ini 或是    find / -name my.cnf    加入下面两句话然后重启服务
sql_mode=ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION
lower_case_table_names=1
//重启服务 systemctl stop/start  mysqld.service

-------------zookeeper---------------

zookeeper的客户端快捷命令:zkCli.sh
create [-s] [-e] path data
ls 显示根节点
ls2 详细的信息
get 获得节点的内容
get 内容
set 设置节点的内容
watch 在某些命令下后面可添加监听
delete 删除节点下为空的节点
rmr 删除多级节点

sudo netstat -nltp | grep 2181 查看对应的进程
ps -ef|grep zookeeper    查看关于zookeeper 的进程

scp -r 目录 root@地址:/目录   //发送文件

ssh-keygen -t rsa    //免密登录
ssh-copy-id master

firewall-cmd --state    //查看防火墙的状态firewall

systemctl stop firewalld.service    //禁止firewall开机启动

systemctl disable firewalld.service

----------------hadoop------------------------------
HA的启动
三台机器上都可以启动的:
    zkServer.start
    hadoop-daemon.sh start journalnode

han01    hdfs zkfc -formatZK         //第一次启动格式化
    hadoop namenode -format        //第一次启动格式化
    hadoop-daemon.sh start namenode
    
han02    hdfs namenode -bootstrapStandby
    hadoop-daemon.sh start namenode

han01    hadoop-daemons.sh start datanode

han03    start-yarn.sh

han01    hadoop-daemons.sh start zkfc

zookeeper 启动
han01:start-dfs.sh(hdfs(namenode,datanode,jouralnode))
han02:start-yarn.sh(yarn(sourcemanager,nodeManager))

//主从NameNode之间的切换 hdfs haadmin --transitionToActive nn1

//hadoop刚启动或是升级会进入安全模式,只能读文件,不能写文件
命令  hadoop dfsadmin -safemode get  查看安全模式状态
命令  hadoop dfsadmin -safemode enter    进入安全模式状态
命令  hadoop dfsadmin -safemode leave   离开安全模式



--------------------spark--------------------------

spark-submit --master spark://han01:7077 --class Dome --executor-memory 512m --total-executor-cores 1 /spark2-1.0-SNAPSHOT.jar hdfs://han02:9000/words.txt
hdfs://han02:9000/wordout

spark-shell --master spark://han01:7077 --total-executor-cores 1 --executor-memory 512m    //测试spark是否启动

//单词统计
val rdd1= sc.textFile("hdfs://han02:9000/words.txt")
val rdd2= spark.read.textFile("hdfs://han02:9000/words.txt")
rdd1.flatMap(_.split(" ")).map((_,1)).reduceByKey(_+_).sortBy(_._2,false)
rdd2.flatMap(x=>x.split(" ")).groupByKey(x=>x).count()

sc.parallelize(new ...,num)    num为分区个数
rdd.partition.length 

--------------------hive----------------------------

//hive创建表
create table t1(id int,name String) row format delimited fields terminated by ',';

//hive 设置reduce的数量
set mapred.reduce.tasks = 6;

//向hive表中加载数据
load data inpath '/sqoop/dept' overwrite into table dept;

---------------------sqoop---------------------------

//将hive导入hdfs上
insert overwrite directory '/root/hive_test/1.txt' select * from behavior_table;

//将本地数据导入hive中
load data local inpath '/root/hive_test/a.txt' OVERWRITE INTO TABLE havior_table;

//将hive的数据导入本地中
insert overwrite local directory '/root/hive_test/1.txt' select * from behavior_table;
//sqoop导入hive
sqoop import -m 1 --connect jdbc:mysql://192.168.188.130:3306/mydatabase --username root --password root --table dept --hive-import --hive-database han1 --hive-overwrite --create-hive-table --hive-table comments --delete-target-dir

//sqoop导入hdfs
sqoop import --connect jdbc:mysql://192.168.188.130:3306/mydatabase --username root --password root --table dept --target-dir '/sqoop/dept' --fields-terminated-by '	'

---------------------kafka-------------------------

./bin/kafka-server-start.sh -daemon config/server.properties &    //启动           
创建会话
./bin/kafka-topics.sh --create --zookeeper zhiyou01:2181,zhiyou02:2181,zhiyou03:2181 --replication-factor 2 --partitions 3 --topic test3
列出会话
./bin/kafka-topics.sh --list --zookeeper localhost:2181       
启动生产者
./bin/kafka-console-producer.sh --broker-list zhiyou01:9092, zhiyou02:9092,zhiyou03:9092 --topic test
启动消费者
./bin/kafka-console-consumer.sh --bootstrap-server zhiyou01:9092, zhiyou02:9092,zhiyou03:9092 --from-beginning --topic test3
启动flume
bin/flume-ng agent --conf conf --conf-file conf/flume-kafka.conf --name a1 -Dflume.root.logger=INFO,console
原文地址:https://www.cnblogs.com/han-guang-xue/p/10942064.html