一童本、HTTP-Connection-Manager
- httproute-simple-match
cd servicemesh_in_practise/HTTP-Connection-Manager/httproute-simple-match
# 啟動
docker-compose up
# 驗證
curl 172.31.50.10 # 默認規(guī)則
Hello from App behind Envoy! Hostname: ed9dbef5686c, Address: 172.31.50.5!
curl -H "Host: ilinux.io" 172.31.50.10 # 匹配主機頭規(guī)則
Hello from App behind Envoy! Hostname: acb8353a7ebc, Address: 172.31.50.4!
curl -H "Host: ilinux.io" 172.31.50.10/service/blue # 匹配主機名和服務名后綴規(guī)則
Hello from App behind Envoy (service light_blue)! hostname: 733ad649eb51 resolved hostname: 172.31.50.6
curl -I -H "Host: ilinux.io" 172.31.50.10/service/helloblue # 匹配主機名和服務名后綴重定向規(guī)則
HTTP/1.1 301 Moved Permanently
location: http://ilinux.io/service/blue
date: Thu, 28 Apr 20xx xx:19:52 GMT
server: envoy
transfer-encoding: chunked
curl -I -H "Host: ilinux.io" 172.31.50.10/service/yellow
This page will be provided soon later.
- httproute-headers-match
cd servicemesh_in_practise/HTTP-Connection-Manager/httproute-headers-match
# 啟動
docker-compose up
# 驗證
curl 172.31.52.10 # 默認規(guī)則
iKubernetes demoapp v1.0 !! ClientIP: 172.31.52.10, ServerName: demoapp-v1.0-1, ServerIP: 172.31.52.5!
curl -H "X-Canary: true" 172.31.52.10 # 匹配主機頭規(guī)則
iKubernetes demoapp v1.2 !! ClientIP: 172.31.52.10, ServerName: demoapp-v1.2-1, ServerIP: 172.31.52.2!
curl 172.31.52.10?username=vip_linux # 匹配查詢字符串前綴規(guī)則
iKubernetes demoapp v1.1 !! ClientIP: 172.31.52.10, ServerName: demoapp-v1.1-1, ServerIP: 172.31.52.6!
- http-traffic-shifting
cd servicemesh_in_practise/HTTP-Connection-Manager/http-traffic-shifting
# 啟動
docker-compose up
# 驗證
./send-request.sh 172.31.55.10 # 驗證新舊版本流量比例
curl -XPOST http://172.31.55.10:9901/runtime_modify?routing.traffic_shift.demoapp=90 # 切換10%的流量到新版本app
OK
curl -XPOST http://172.31.55.10:9901/runtime_modify?routing.traffic_shift.demoapp=0 # 切換所有流量到新版app
OK
image.png
image.png
- http-traffic-splitting
cd servicemesh_in_practise/HTTP-Connection-Manager/http-traffic-splitting
# 啟動
docker-compose up
# 驗證
./send-request.sh 172.31.57.10 # 驗證新舊版本流量比例
curl -XPOST 'http://172.31.57.10:9901/runtime_modify?routing.traffic_split.demoapp.demoappv10=10&routing.traffic_split.demoapp.demoappv11=90' # 切換90%的流量到新版本app
OK
curl -XPOST 'http://172.31.57.10:9901/runtime_modify?routing.traffic_split.demoapp.demoappv10=0&routing.traffic_split.demoapp.demoappv11=100' # 切換所有流量到新版app
OK
兩個集群的流量比例加起來需要等于100
image.png
image.png
- http-request-mirror
cd servicemesh_in_practise/HTTP-Connection-Manager/http-request-mirror
# 啟動
docker-compose up
# 驗證
./send-request.sh 172.31.60.10 # 發(fā)送請求,默認有20%的流量發(fā)給測試版本,觀測日志可以看到
curl -XPOST 'http://172.31.60.10:9901/runtime_modify?routing.request_mirror.demoapp=50' # 復制50%的流量到測試版本
OK
curl -XPOST 'http://172.31.60.10:9901/runtime_modify?routing.request_mirror.demoapp=100' # 復制所有流量到測試版本
OK
image.png
image.png
image.png
- fault-injection
cd servicemesh_in_practise/HTTP-Connection-Manager/fault-injection
# 啟動
docker-compose up
# 驗證
curl -w"@curl_format.txt" -o /dev/null -s "http://172.31.62.10/service/red" # 10%的流量延時故障注入,遇上故障,傳輸時長增加10s
time_namelookup: 0.000023
time_connect: 0.000195
time_appconnect: 0.000000
time_pretransfer: 0.000496
time_redirect: 0.000000
time_starttransfer: 10.012071
----------
time_total: 10.012236
curl -w '%{http_code}\n' -o /dev/null -s "http://172.31.62.10/service/blue" # 10%的流量注入503故障贸街,遇上故障,返回503響應碼
503
- timeout-retries
cd servicemesh_in_practise/HTTP-Connection-Manager/timeout-retries
# 啟動
docker-compose up
# 驗證
curl -w"@curl_format.txt" -o /dev/null -s "http://172.31.65.10/service/red" # 50%的流量延時故障注入,設置超時時長為1s狸相,遇上故障匾浪,1s即返回
time_namelookup: 0.000022
time_connect: 0.000158
time_appconnect: 0.000000
time_pretransfer: 0.000195
time_redirect: 0.000000
time_starttransfer: 1.001305
----------
time_total: 1.001412
./send-requests.sh http://172.31.65.10/service/blue 100 # 50%的流量注入503故障,增加重試功能卷哩,503出現(xiàn)概率大大降低
200
200
200
200
503
200
200
200
200
200
200
200
200
503
200
./send-requests.sh http://172.31.65.10/service/colors 100
200
200
200
200
200
200
200
504 # 504響應碼是由于上游請求超時導致
200
200
200
504
200
二、Monitoring-and-Tracing
- monitoring
cd servicemesh_in_practise/Monitoring-and-Tracing/monitoring
# 啟動
docker-compose up
# 驗證
while true; do curl 172.31.70.10; sleep 0.$RANDOM; done
訪問prometheus
image.png
訪問grafana
image.png
- access-log
cd servicemesh_in_practise/Monitoring-and-Tracing/access-log
# 啟動
docker-compose up
# 驗證
curl 172.31.73.10 # 查看訪問日志
image.png
編輯front_envoy.yaml文件属拾,注釋json-format格式将谊,打開text-format格式
# 啟動
docker-compose up
# 驗證
curl 172.31.73.10 # 查看訪問日志
image.png
cd servicemesh_in_practise/Monitoring-and-Tracing/accesslog-with-efk
# 啟動
docker-compose up
# 驗證
while true; do curl 172.31.76.10/service/colors; sleep 0.$RANDOM; done
# 驗證es
curl 172.31.76.15:9200
{
"name" : "myes01",
"cluster_name" : "myes",
"cluster_uuid" : "H_iE6pcgSgixypqBZFrzuA",
"version" : {
"number" : "7.14.2",
"build_flavor" : "default",
"build_type" : "docker",
"build_hash" : "6bc13727ce758c0e943c3c21653b3da82f627f75",
"build_date" : "2021-09-15T10:18:09.722761972Z",
"build_snapshot" : false,
"lucene_version" : "8.9.0",
"minimum_wire_compatibility_version" : "6.8.0",
"minimum_index_compatibility_version" : "6.0.0-beta1"
},
"tagline" : "You Know, for Search"
}
# 查看es索引
curl 172.31.76.15:9200/_cat/indices
green open .geoip_databases ysusGG6bQrSQie3VgRDhuw 1 0 40 0 37.7mb 37.7mb
yellow open filebeat-7.14.2-20xx.xx.28-000001 KC1dWFZ4TtOvFXudTtl_gw 1 1 0 0 208b 208b
green open .apm-custom-link EL91AX5VShGzoKJZqjhTow 1 0 0 0 208b 208b
green open .kibana_task_manager_7.14.2_001 Z1h_EiWdSTalLMGbu_R_lA 1 0 14 183 77.2kb 77.2kb
green open .apm-agent-configuration 9B_-D92yQaS17Di44e4gFw 1 0 0 0 208b 208b
green open .kibana_7.14.2_001 P1tti9iQS0iTjfoqlpiGww 1 0 10 0 2.3mb 2.3mb
yellow open filebeat-2022.04.28 rzRoPfUFTWuNK_wWlCASPQ 1 1 292 0 290.8kb 290.8kb
green open .kibana-event-log-7.14.2-000001 dg4EalAiRbGMvPyos1ZHow 1 0 1 0 5.5kb 5.5kb
kibana展示
image.png
- monitoring-and-accesslog
cd servicemesh_in_practise/Monitoring-and-Tracing/monitoring-and-accesslog
# 啟動
docker-compose up
# 驗證
while true; do curl 172.31.79.10/service/colors; sleep 0.$RANDOM; done
# 驗證es
curl 172.31.79.15:9200
{
"name" : "myes01",
"cluster_name" : "myes",
"cluster_uuid" : "SMKEiNPeQe2eTFExMT5p9A",
"version" : {
"number" : "7.14.2",
"build_flavor" : "default",
"build_type" : "docker",
"build_hash" : "6bc13727ce758c0e943c3c21653b3da82f627f75",
"build_date" : "2021-09-15T10:18:09.722761972Z",
"build_snapshot" : false,
"lucene_version" : "8.9.0",
"minimum_wire_compatibility_version" : "6.8.0",
"minimum_index_compatibility_version" : "6.0.0-beta1"
},
"tagline" : "You Know, for Search"
}
# 查看es索引
curl 172.31.79.15:9200/_cat/indices
green open .geoip_databases hrkkroDNRaKu2a0JGN-nSA 1 0 40 0 37.7mb 37.7mb
yellow open filebeat-7.14.2-20xx.xx.28-000001 VbsXUmPoTvuPagf9Etxtzg 1 1 0 0 208b 208b
green open .apm-custom-link pFrJAe0BRhm7DrCcodldVg 1 0 0 0 208b 208b
green open .apm-agent-configuration aHQxL3AmQFWqYmJeoi1fzQ 1 0 0 0 208b 208b
green open .kibana_task_manager_7.14.2_001 8Gq6PvpzQValMTAuHuDu8Q 1 0 14 87 235.3kb 235.3kb
green open .kibana_7.14.2_001 CW4oUQQESFmp3_Zn5QBQiQ 1 0 11 1 4.6mb 4.6mb
yellow open filebeat-2022.04.28 UDHC5YUjS_C-KyUAx9G2RA 1 1 275 0 432.6kb 432.6kb
green open .kibana-event-log-7.14.2-000001 _bGofajrQLCqVKMktz0zpg 1 0 2 0 10.9kb 10.9kb
green open .tasks BWvcKq2ESI2vUhjt02ItLw 1 0 2 0 7.7kb 7.7kb
kibana展示
1651135764(1).jpg
grafana展示
image.png
- zipkin-tracing
cd servicemesh_in_practise/Monitoring-and-Tracing/zipkin-tracing
# 啟動
docker-compose up
# 驗證
while true; do curl 172.31.79.10/service/colors; sleep 0.$RANDOM; done
zipkin展示
image.png
三冷溶、監(jiān)控與跟蹤
- 自定義日志格式
修改Monitoring-and-Tracing中access-log的front-envoy.yaml文件,定義日志格式
vim front-envoy.yaml # 將text_format改為如下內容
text_format: "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT% - - [%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %RESPONSE_CODE% %BYTES_SENT% \"-\" \"%REQ(USER-AGENT)%\"\n"
# 驗證
172.31.73.1 - - [20xx-xx-28Txx:24:31.520Z] "GET / HTTP/1.1" 200 75 "-" "curl/7.68.0"
- skywalking監(jiān)控
# docker-compose文件如下
cat docker-compose.yml
version: '3.3'
services:
front-envoy:
image: envoyproxy/envoy-alpine:v1.21-latest
environment:
- ENVOY_UID=0
- ENVOY_GID=0
volumes:
- "./front_envoy/envoy-config.yaml:/etc/envoy/envoy.yaml"
networks:
envoymesh:
ipv4_address: 172.31.85.10
aliases:
- front-envoy
- front
ports:
- 8080:80
- 9901:9901
service_a_envoy:
image: envoyproxy/envoy-alpine:v1.21-latest
environment:
- ENVOY_UID=0
- ENVOY_GID=0
volumes:
- "./service_a/envoy-config.yaml:/etc/envoy/envoy.yaml"
networks:
envoymesh:
aliases:
- service_a_envoy
- service-a-envoy
ports:
- 8786
- 8788
- 8791
service_a:
build: service_a/
network_mode: "service:service_a_envoy"
#ports:
#- 8081
depends_on:
- service_a_envoy
service_b_envoy:
image: envoyproxy/envoy-alpine:v1.21-latest
environment:
- ENVOY_UID=0
- ENVOY_GID=0
volumes:
- "./service_b/envoy-config.yaml:/etc/envoy/envoy.yaml"
networks:
envoymesh:
aliases:
- service_b_envoy
- service-b-envoy
ports:
- 8789
service_b:
build: service_b/
network_mode: "service:service_b_envoy"
#ports:
#- 8082
depends_on:
- service_b_envoy
service_c_envoy:
image: envoyproxy/envoy-alpine:v1.21-latest
environment:
- ENVOY_UID=0
- ENVOY_GID=0
volumes:
- "./service_c/envoy-config.yaml:/etc/envoy/envoy.yaml"
networks:
envoymesh:
aliases:
- service_c_envoy
- service-c-envoy
ports:
- 8790
service_c:
build: service_c/
network_mode: "service:service_c_envoy"
#ports:
#- 8083
depends_on:
- service_c_envoy
es7:
image: elasticsearch:7.17.3
container_name: es7
ports:
- 9200:9200
- 9300:9300
environment:
- discovery.type=single-node #單機模式
- bootstrap.memory_lock=true #鎖定物理內存地址
- "ES_JAVA_OPTS=-Xms1048m -Xmx1048m" #堆內存大小
- TZ=Asia/Shanghai
ulimits:
memlock:
soft: -1
hard: -1
networks:
envoymesh:
ipv4_address: 172.31.85.15
skywalking-oap:
image: apache/skywalking-oap-server:8.6.0-es7
container_name: skywalking-oap
restart: always
depends_on:
- es7
links:
- es7
ports:
- 11800:11800
- 12800:12800
environment:
TZ: Asia/Shanghai
SW_STORAGE: elasticsearch7
SW_STORAGE_ES_CLUSTER_NODES: es7:9200
networks:
envoymesh:
ipv4_address: 172.31.85.16
skywalking-ui:
image: apache/skywalking-ui:8.6.0
container_name: skywalking-ui
restart: always
depends_on:
- skywalking-oap
links:
- skywalking-oap
ports:
- 8081:8080
environment:
TZ: Asia/Shanghai
SW_OAP_ADDRESS: skywalking-oap:12800
networks:
envoymesh:
ipv4_address: 172.31.85.17
networks:
envoymesh:
driver: bridge
ipam:
config:
- subnet: 172.31.85.0/24
# envoy配置文件如下
cat envoy-config.yaml
node:
id: front-envoy
cluster: front-envoy
admin:
profile_path: /tmp/envoy.prof
access_log_path: /tmp/admin_access.log
address:
socket_address:
address: 0.0.0.0
port_value: 9901
layered_runtime:
layers:
- name: admin
admin_layer: {}
static_resources:
listeners:
- name: http_listener-service_a
address:
socket_address:
address: 0.0.0.0
port_value: 80
traffic_direction: OUTBOUND
filter_chains:
- filters:
- name: envoy.filters.network.http_connection_manager
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
generate_request_id: true
tracing:
provider:
name: envoy.tracers.skywalking
typed_config:
"@type": type.googleapis.com/envoy.config.trace.v3.SkyWalkingConfig
grpc_service:
envoy_grpc:
cluster_name: skywalking
timeout: 0.250s
client_config:
service_name: front-envoy
instance_name: front-envoy-1
codec_type: AUTO
stat_prefix: ingress_http
route_config:
name: local_route
virtual_hosts:
- name: backend
domains:
- "*"
routes:
- match:
prefix: "/"
route:
cluster: service_a
decorator:
operation: checkAvailability
response_headers_to_add:
- header:
key: "x-b3-traceid"
value: "%REQ(x-b3-traceid)%"
- header:
key: "x-request-id"
value: "%REQ(x-request-id)%"
http_filters:
- name: envoy.filters.http.router
clusters:
- name: skywalking
type: STRICT_DNS
lb_policy: ROUND_ROBIN
typed_extension_protocol_options:
envoy.extensions.upstreams.http.v3.HttpProtocolOptions:
"@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions
explicit_http_config:
http2_protocol_options: {}
load_assignment:
cluster_name: skywalking
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: skywalking-oap
port_value: 11800
- name: service_a
connect_timeout: 0.25s
type: strict_dns
lb_policy: ROUND_ROBIN
load_assignment:
cluster_name: service_a
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: service_a_envoy
port_value: 8786
# 啟動
docker-compose up
#驗證
while true; do curl 172.31.85.10; sleep 0.$RANDOM; done
參考配置:https://github.com/envoyproxy/envoy/blob/main/examples/skywalking-tracing/front-envoy-skywalking.yaml
注:一定要找對skywalking和es對應的版本尊浓,在嘗試apache/skywalking-oap-server:8.7.0-es7和apache/skywalking-ui:8.7.0逞频,啟動skywalking后,訪問ui界面日志會報錯栋齿,容器可以正常啟動苗胀,就是界面不顯示概覽圖;還需要注意skywalking暴露的端口和front-envoy暴露的端口瓦堵,不要沖突基协;現(xiàn)在yaml中的版本,啟動的時候日志會有些報錯菇用,但是不影響使用澜驮;全部啟動完成后,執(zhí)行訪問命令產生日志和記錄惋鸥,skywalking-ui界面要對應好時區(qū)杂穷,然后點擊刷新按鈕,需要等會兒才會有數(shù)據(jù)顯示
skywalking驗證
image.png
image.png
image.png