部署运维常用命令
Linux命令
常用
查看端口占用
netstat -tunlp|grep 3306
lsof -i:8363
抓网卡包
tcpdump -i enp6s0f0 host 183.224.8.165 and port 514 -w systest.cap 接收和发送 tcpdump -i enp6s0f0 dst host 183.224.8.165 and port 514 -w systest.cap 发送
-i 网卡 目的IP +端口 -w保存文件 **.cap
设备清内存
zkqa@zkqa:~$ df -h
Filesystem Size Used Avail Use% Mounted on
udev 32G 0 32G 0% /dev
tmpfs 6.3G 1.5M 6.3G 1% /run
/dev/sda2 457G 47G 387G 11% /
tmpfs 32G 84K 32G 1% /dev/shm
tmpfs 5.0M 0 5.0M 0% /run/lock
tmpfs 32G 0 32G 0% /sys/fs/cgroup
/dev/sda1 511M 7.8M 504M 2% /boot/efi
/dev/loop0 55M 55M 0 100% /snap/core18/1705
/dev/loop1 69M 69M 0 100% /snap/lxd/14804
/dev/loop2 28M 28M 0 100% /snap/snapd/7264
tmpfs 6.3G 0 6.3G 0% /run/user/1000
zkqa@zkqa:~$ free -g
total used free shared buff/cache available
Mem: 62 55 0 0 7 6
Swap: 7 3 4
zkqa@zkqa:~$ sudo -i
[sudo] password for zkqa:
root@zkqa:~# echo 3 >/proc/sys/vm/drop_caches
root@zkqa:~# free -g
total used free shared buff/cache available
Mem: 62 55 7 0 0 6
Swap: 7 3 4
root@zkqa:~#
脚本
大文件夹切割成六个小的文件夹(生成目录有bug,需手动创建1-6文件夹)
#!/bin/bash
#for((i=1;i<=6;i++)){
# mkdir ./$i
#}
files=`find ./`
filesnum=`find ./ -type f -print | wc -l`
num=`expr $filesnum / 6`
moved=0
target=
for i in $files
do
if [ $moved -lt $num ]
then
target=../1/
elif [[ $moved -ge $num && $moved -lt $[2*$num] ]]
then
target=../2/
elif [[ $moved -ge $[2*$num] && $moved -lt $[3*$num] ]]
then
target=../3/
elif [[ $moved -ge $[3*$num] && $moved -lt $[4*$num] ]]
then
target=../4/
elif [[ $moved -ge $[4*$num] && $moved -lt $[5*$num] ]]
then
target=../5/
else
target=../6/
fi
mv -v $i $target
let "moved++"
done
大文件去重
#!/bin/bash
#号是注释
#文件名称,改为自己的实际文件路径
#文件内容每行都类似 2ff9a2acc|8.0.0.168|Anc|6.1x64|2052|2018-01-25
split_file_name=$1
#去掉空格统计文件行数
lines=`wc -l $split_file_name | sed 's/ .*//g'`
#确定等比例分割的行数,这里分成20个文件
lines_per_file=`expr $lines / 20`
echo "lines: $lines_per_file"
#分割文件 -d可以设置文件大小,这里没设置, -l设置文件行数,这里设置了 $lines_per_file 行
#分割后的文件类似 part_logon.txt01 part_logon.txt02
split -l $lines_per_file -d -a 4 $split_file_name part_$split_file_name
echo "-- split done --"
#循环处理__part_开头的文件
for file in part_*
do
{
sort -f -t "|" -r -k2,2 $file | sort -f -t "|" -u -k1,1 -k3,3 -o sort_$file
}
done
echo "-- sort done --"
#同时对 sort_开头的文件去重并输出覆盖原来的logon.txt文件
sort -smu sort_* > $split_file_name
#然后删除分割的与分割排序去重的文件
rm -f part_*
rm -f sort_*
#!/bin/bash
#号是注释
#文件名称,改为自己的实际文件路径
#文件内容每行都类似 2ff9a2acc|8.0.0.168|Anc|6.1x64|2052|2018-01-25
split_file_name=$1
#去掉空格统计文件行数
lines=`wc -l $split_file_name | sed 's/ .*//g'`
#确定等比例分割的行数,这里分成20个文件
lines_per_file=`expr $lines / 20`
#分割文件 -d可以设置文件大小,这里没设置, -l设置文件行数,这里设置了 $lines_per_file 行
#分割后的文件类似 __part_logon.txt01 __part_logon.txt02
split -d -l $lines_per_file $split_file_name __part_$split_file_name
#循环处理__part_开头的文件
for file in __part_*
do
{
#按照文件中的|符号分割排序 -r -k2,2表示对8.0.0.168这一列排序 -r排序
#然后去重文件 -u -k1,1 -k3,3表示同时对2ff9a2acc列和Anc列去重
# -u表示去重 -o表示写入文件 这里是写入以sort_开头的文件
sort -f -t "|" -r -k2,2 $file | sort -f -t "|" -u -k1,1 -k3,3 -o sort_$file
}
done
#同时对 sort_开头的文件去重并输出覆盖原来的logon.txt文件
sort -smu sort_* > $split_file_name
#然后删除分割的与分割排序去重的文件
rm -f __part_*
rm -f sort_*
删除指定后缀的具有相同名称所有文件
启动
文件样式:(删除 871_183224024202_20211201160015_1.*)
-rw-r--r-- 1 wh wh 0 Dec 3 17:07 871_183224024202_20211201160015_1.gz
-rw-r--r-- 1 wh wh 0 Dec 3 17:07 871_183224024202_20211201160015_1.gz.done
-rw-r--r-- 1 wh wh 0 Dec 3 17:07 871_183224024202_20211201160015_1.gz.done.1
-rw-r--r-- 1 wh wh 0 Dec 3 17:07 871_183224024202_20211201160015_1.gz.done.2
-rw-r--r-- 1 wh wh 0 Dec 3 17:07 871_183224024202_20211201160015_1.gz.done.3
-rwxr-xr-x 1 wh wh 193 Dec 3 17:22 testFileRm.sh*
./testFileRm.sh "*.done"
#!/bin/bash
a=$1
echo "输入的条件为: $a"
echo "-- start --"
for file in $a
do
{
echo $file
b=${file%%.*}
if test "$file" != "$a"
then
rm -rf $b.*
fi
}
done
echo "-- end --"
通过定时任务监听文件夹移动文件到指定文件夹
#!/bin/bash
#Prevent garbled code,use English
#crontab -e
#input
#* * * * * this.sh >> value.log
pwd
source=/data/taiyue
objective=/data/zkqa/dns
for file in $source/*.gz
do
{
a=${file//_/}
b=${a//.gz/}
c=${b:(-10):10}
#Determine the number of folders to distribute
d=$(($c%9))
#File overwrite occurs
mv -fv $file $objective/$d/
}
done
sftp推送文件(⭐️==必须配置ssh免密登录==⭐️)
#!/bin/bash
server=zkqa@10.171.80.188
port=22
source=/data/zkqa/dns/7
filepath=/data/zkqa/data/dns
filecount=$(ls $source/*.gz|wc -l)
echo $filecount
if [ $filecount -ge 1 ]
then
for file in $source/*.gz
do
#echo $file
if [ -f $file ]
then
newfile=$(basename $file)
echo $newfile
sftp -P $port $server << EOF
pwd
put $file $filepath/$newfile.tmp
rename $filepath/$newfile.tmp $filepath/$newfile
close
bye
EOF
rm -rf $file
fi
done
fi
配置SSH密钥对
[zkqa@soar01 ~]<20211223 16:51:58>$ ls -a
. .bash_history .bash_profile .cache data dns .javacpp .pki .ssh .viminfo
.. .bash_logout .bashrc .config data-logs fw.txt logship software vector
[zkqa@soar01 ~]<20211223 16:52:05>$
[zkqa@soar01 ~]<20211223 16:52:19>$
[zkqa@soar01 ~]<20211223 16:52:20>$ ssh-copy-id -i ./.ssh/id_rsa.pub 10.171.80.183
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "./.ssh/id_rsa.pub"
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
Authorized users only. All activity may be monitored and reported.
zkqa@10.171.80.183's password:
Number of key(s) added: 1
Now try logging into the machine, with: "ssh '10.171.80.183'"
and check to make sure that only the key(s) you wanted were added.
[zkqa@soar01 ~]<20211223 16:52:50>$ ssh 10.171.80.183
...
按行切割
split -l 10000000 -d -a 4 domainregister.1.log domainregister-
sort -f -t "|" -r -k2,2 domainregister-0000 | sort -f -t "|" -u -k1,1 -k3,3 -o sort_domainregister-0000
删除
递归删除当前目录下指定名称文件logUtil.py
find ./spiders/ -name '*.gz' -type f -print -exec rm -rf {} ;
find ./spiders/ -name 'domainregister.*' -type f -print -exec rm -rf {} ;
find ./spiders/ -name 'newDomain.*' -type f -print -exec rm -rf {} ;
find ./spiders/ -name 'whoisinfo.*' -type f -print -exec rm -rf {} ;
删除一天前的文件
find ./ -name '*' -atime +1 -ls -exec rm -rf {} ;
删除1分钟前的文件
find ./ -name '*' -mimin +1 -ls -exec rm -rf {} ;
重定向
whois
find ./spiders/ -name 'whoisinfo.*' -type f -print -exec cat {} >> ./files/whoisinfo.log ;
register
find ./spiders/ -name 'domainregister.*' -type f -print -exec cat {} >> ./files/domainregister.log ;
domain
find ./spiders/ -name 'newDomain.*' -type f -print -exec cat {} >> ./files/newDomain.log ;
查找
find ./spiders/ -name 'whoisinfo.*';
计算行数
find ./spiders/ -name 'whois.*' -type f -exec wc -l {} ;
启动程序
启动python3 爬虫脚本
nohup python3 -u readFileSpiderDo* &
防火墙
ufw
sudo ufw allow proto tcp from 23.94.212.58 to any port 6379 sudo ufw allow proto tcp from 23.94.103.141 to any port 9092
sudo ufw allow 9092
1、查看firewall服务状态
systemctl status firewalld 出现Active: active (running)切高亮显示则表示是启动状态。 出现 Active: inactive (dead)灰色表示停止,看单词也行。 代表已启动
2、查看firewall的状态
firewall-cmd --state 运行中
3、开启、重启、关闭、firewalld.service服务
开启
service firewalld start
重启
service firewalld restart
关闭
service firewalld stop
4、查看防火墙规则
firewall-cmd --list-all
5、查询、开放、关闭端口
#查询所有开放的端口 firewall-cmd --list-port
查询端口是否开放
firewall-cmd --query-port=8079/tcp no代表未开放
开放80端口
firewall-cmd --permanent --add-port=8079/tcp
移除端口
❌firewall-cmd --permanent --remove-port=8080/tcp ⭐️
firewall-cmd --permanent --add-rich-rule="rule family="ipv4" source address="23.94.212.58" port protocol="tcp" port="6379" accept"
firewall-cmd --permanent --add-rich-rule="rule family="ipv4" source address="23.94.212.58" port protocol="tcp" port="16379" accept"
firewall-cmd --permanent --add-rich-rule="rule family="ipv4" source address="54.38.15.118" port protocol="tcp" port="6379" accept"
firewall-cmd --permanent --add-rich-rule="rule family="ipv4" source address="54.38.15.118" port protocol="tcp" port="16379" accept"
firewall-cmd --permanent --add-rich-rule="rule family="ipv4" source address="23.94.103.141" port protocol="tcp" port="6379" accept"
firewall-cmd --permanent --add-rich-rule="rule family="ipv4" source address="23.94.103.141" port protocol="tcp" port="16379" accept"
firewall-cmd --permanent --add-rich-rule="rule family="ipv4" source address="10.173.100.71-10.173.100.73" port protocol="tcp" port="8383" accept"
7、重启防火墙(修改配置后要重启防火墙) ==必须操作==
firewall-cmd --reload
redis cluster集群
./src/redis-cli --cluster create 23.94.212.58:6379 23.94.103.141:6379 54.38.15.118:6379 --cluster-replicas 1
#python3.8
startup_nodes = [
{'host': '23.94.212.58', 'port': '6379'},
{'host': '23.94.103.141', 'port': '6379'},
{'host': '54.38.15.118', 'port': '6379'},
]
# 创建redis cluster的连接
r = redisClusterHelper(startup_nodes)
Flink
==vim ~/.bashrc 配置环境变量==
export JAVA_HOME=/data/zkqa/software/jdk-11.0.12+7 export PATH=PATH
==export FLINK_HOME=/data/zkqa/software/flink-1.13.1== ==export PATH=PATH==
查看版本
flink -vVersion: 1.13.1, Commit ID: a7f3192
启动任务
flink run *.jar* -parameter1 value1 -parameter2 value2 ...
查看正在执行的任务
flink list
value:
Waiting for response...
------------------ Running/Restarting Jobs -------------------
20.12.2021 21:54:19 : 30db9ca3e0b0dbba49a488f8ca06e22c : accuracy-mark-task (RUNNING)
20.12.2021 21:55:35 : 754bae5c9a2636cc4470c3ccbfb39c40 : control-mark-task (RUNNING)
20.12.2021 21:55:56 : 02ea2179eb1a46974a9e5a1df70aa239 : formatter-chaitin-honepot-ynyd-task (RUNNING)
20.12.2021 21:57:52 : 5342c9a0bc097bced2f8b1a74f19e6fe : formatter-dptech-ddos-ynyd-task (RUNNING)
20.12.2021 22:01:07 : 0e110a74e4a9da64ad4a63ea285e7d6c : formatter-dptech-waf-ynyd-task (RUNNING)
20.12.2021 22:02:12 : 6c561e8f07deab7cb968bdb9ef2a21e7 : formatter-h3c-ips-ynyd-task (RUNNING)
20.12.2021 22:03:43 : 4e4973366d48629a074296d165da7526 : formatter-yn-dns-task (RUNNING)
20.12.2021 22:04:06 : 1202915d4a3bf4aea17102a9d0b08ad0 : formatter-nsfocus-ddos-ynyd-task (RUNNING)
20.12.2021 22:04:29 : 8ac75e93c7b290d850e51064612013df : formatter-nsfocus-ips-ynyd-task (RUNNING)
20.12.2021 22:05:06 : 44d5e225951b69103ab8a52955b189b3 : formatter-zkqa-http-alert-task (RUNNING)
20.12.2021 22:05:33 : 310e7cdb18fbb3ae5c42b03ce3cc032f : group-identity-mark-task (RUNNING)
--------------------------------------------------------------
No scheduled jobs.
Windows
kafka启动
bin/kafka-consumer-groups.sh --bootstrap-server 192.168.1.22:9092 --describe --group WEBCONSUMER
bin/kafka-consumer-groups.sh --bootstrap-server 192.168.1.22:9092 --describe --group DNSCONSUMER
kafka-consumer-groups.bat --bootstrap-server localhost:9092 --describe --group DNSCONSUMER
nohup bin/zookeeper-server-start.sh config/zookeeper.properties >/dev/null 2>&1 &
nohup bin/kafka-server-start.sh config/server.properties >/dev/null 2>&1 &
bin/zkServer.sh start
bin/zkServer.sh status
bin/kafka-server-start.sh -daemon config/server.properties
bin/kafka-topics.sh --zookeeper 192.168.1.22:2181 --topic website --describe
D:\develop\kafka_2.13-2.8.0\bin\windows\zookeeper-server-start.bat D:\develop\kafka_2.12-2.8.0\config\zookeeper.properties // 启动zookeeper
D:\develop\kafka_2.13-2.8.0\bin\windows\kafka-server-start.bat D:\develop\kafka_2.12-2.8.0\config\server.properties // 启动kafka0
D:\tools\kafka_2.12-2.1.0\bin\windows\kafka-server-start.bat D:\develop\kafka_2.12-2.8.0\config\server1.properties // 启动kafka1
D:\tools\kafka_2.12-2.1.0\bin\windows\kafka-server-start.bat D:\develop\kafka_2.13-2.8.0\config\server2.properties // 启动kafka2
kafka-topics.bat --create --zookeeper localhost:2181 --replication-factor 0 --partitions 1 --topic // 测试topic
bin/kafka-topics.sh --create --zookeeper 192.168.1.30:2181 --replication-factor 1 --partitions 8 --topic cntddatadns // 测试集群topic linux
bin/kafka-topics.bat --create --zookeeper localhost:2181 --replication-factor 1 --partitions 4 --topic soar_alert_test
kafka-topics.bat --create --zookeeper localhost:2181 --replication-factor 1 --partitions 32 --topic soar_dns_test
kafka-topics.sh --describe --zookeeper localhost:2181 --topic my-test-topic // 查看那些代理在做什么
kafka-topics.sh --list --zookeeper localhost:2181 // 查看topic
kafka-console-producer.bat --broker-list localhost:9092 --topic whtestflink // 启动 生产者
kafka-console-producer.bat --broker-list localhost:9093 --topic my-test-topic // 启动 生产者
kafka-console-consumer.bat --bootstrap-server localhost:9093 --topic wh1 --from-beginning // 启动 消费者
bin/kafka-console-consumer.sh --bootstrap-server soar01:9092 --topic soar_dns_test --from-beginning
kafka-console-consumer.bat --bootstrap-server localhost:9092 --topic soar_dns_test --from-beginning
wmic process where "caption = 'java.exe' and commandline like '%server1.properties%'" get processid
DNSCONSUMER
bin/kafka-consumer-groups.sh --bootstrap-server 192.168.1.22:9092 --describe --group DNSCONSUMER
zookeeper-server-start.bat D:\develop\kafka_2.12-2.8.0\config\zookeeper.properties
kafka-server-start.bat D:\develop\kafka_2.12-2.8.0\config\server.properties
kafka-consumer-groups.bat --bootstrap-server localhost:9092 --describe --group DNSCONSUMER
nmap && masscan
nmap -sS -vv -n -Pn -p 1-65535 183.224.40.217 masscan -p 1-65535 --rate 2000 183.224.40.217
Wireshark
tcp.port == 1513 || udp.port == 1513http and ip.dst==39.130.175.65
正则表达式
括号外的==分号空格==
;\s(?=[^)]*((|$))
冒号外的==空格==
\s(?=([^"]*"[^"]*")*[^"]*$)