#apache2 #varnish #cache-control #varnish-vcl
#apache2 #лак #контроль кэша #varnish-vcl
Вопрос:
У меня есть apache2 с запущенным Varnich 2. Моя проблема в том, что, хотя для моего контроля кэша установлено значение 1 год, у varnish все еще есть загрузка из N объектов с истекшим 1971 сроком. Вот мой default.vcl, он показывает мои настройки, какие-либо подсказки?:
backend apache {
.host = "127.0.0.1";
.port = "8080";
}
sub vcl_recv {
if (req.http.Accept-Encoding) {
if (req.url ~ ".(jpg|png|gif|gz|tgz|bz2|tbz|mp3|ogg)$") {
# No point in compressing these
remove req.http.Accept-Encoding;
} elsif (req.http.Accept-Encoding ~ "gzip") {
set req.http.Accept-Encoding = "gzip";
} elsif (req.http.Accept-Encoding ~ "deflate") {
set req.http.Accept-Encoding = "deflate";
} else {
# unkown algorithm
remove req.http.Accept-Encoding;
}
}
if (req.url ~ ".(ico|png|gif|jpg|swf|css|js)$") {
return(lookup);
}
}
## Fetch
sub vcl_fetch {
# strip the cookie before the image is inserted into cache.
if (req.url ~ ".(ico|png|gif|jpg|swf|css|js)$") {
unset beresp.http.set-cookie;
}
## Remove X-Forwarded-For header if exists.
remove req.http.X-Forwarded-For;
## insert client IP address as X-Forwarded-For.
set req.http.X-Forwarded-For = req.http.rlnclientipaddr;
## Deliver the content
return(deliver);
}
## Deliver
sub vcl_deliver {
## Clean header
remove resp.http.X-Varnish;
remove resp.http.Via;
remove resp.http.Age;
remove resp.http.X-Powered-By;
if (obj.hits > 0) {
set resp.http.X-Cache = "HIT";
} else {
set resp.http.X-Cache = "MISS";
}
}
Varnishstat, это мой вывод:
client_conn 4165 0.26 Client connections accepted
client_drop 1 0.00 Connection dropped, no sess/wrk
client_req 5036 0.31 Client requests received
cache_hit 1235 0.08 Cache hits
cache_hitpass 0 0.00 Cache hits for pass
cache_miss 2329 0.14 Cache misses
backend_conn 1359 0.08 Backend conn. success
backend_unhealthy 0 0.00 Backend conn. not attempted
backend_busy 0 0.00 Backend conn. too many
backend_fail 0 0.00 Backend conn. failures
backend_reuse 2442 0.15 Backend conn. reuses
backend_toolate 1223 0.08 Backend conn. was closed
backend_recycle 3666 0.23 Backend conn. recycles
backend_unused 0 0.00 Backend conn. unused
fetch_head 0 0.00 Fetch head
fetch_length 3665 0.23 Fetch with Length
fetch_chunked 51 0.00 Fetch chunked
fetch_eof 0 0.00 Fetch EOF
fetch_bad 0 0.00 Fetch had bad headers
fetch_close 67 0.00 Fetch wanted close
fetch_oldhttp 0 0.00 Fetch pre HTTP/1.1 closed
fetch_zero 15 0.00 Fetch zero len
fetch_failed 0 0.00 Fetch failed
n_sess_mem 30 . N struct sess_mem
n_sess 0 . N struct sess
n_object 314 . N struct object
n_vampireobject 0 . N unresurrected objects
n_objectcore 318 . N struct objectcore
n_objecthead 361 . N struct objecthead
n_smf 0 . N struct smf
n_smf_frag 0 . N small free smf
n_smf_large 0 . N large free smf
n_vbe_conn 1 . N struct vbe_conn
n_wrk 10 . N worker threads
n_wrk_create 10 0.00 N worker threads created
n_wrk_failed 0 0.00 N worker threads not created
n_wrk_max 567851 35.17 N worker threads limited
n_wrk_queue 0 0.00 N queued work requests
n_wrk_overflow 37 0.00 N overflowed work requests
n_wrk_drop 4 0.00 N dropped work requests
n_backend 1 . N backends
n_expired 1971 . N expired objects
n_lru_nuked 0 . N LRU nuked objects
n_lru_saved 0 . N LRU saved objects
n_lru_moved 1211 . N LRU moved objects
n_deathrow 0 . N objects on deathrow
losthdr 0 0.00 HTTP header overflows
n_objsendfile 0 0.00 Objects sent with sendfile
n_objwrite 4810 0.30 Objects sent with write
n_objoverflow 0 0.00 Objects overflowing workspace
s_sess 4164 0.26 Total Sessions
s_req 5036 0.31 Total Requests
s_pipe 0 0.00 Total pipe
s_pass 1472 0.09 Total pass
s_fetch 3798 0.24 Total fetch
s_hdrbytes 1925358 119.25 Total header bytes
s_bodybytes 111698308 6918.45 Total body bytes
sess_closed 2172 0.13 Session Closed
sess_pipeline 0 0.00 Session Pipeline
sess_readahead 0 0.00 Session Read Ahead
sess_linger 3290 0.20 Session Linger
sess_herd 3093 0.19 Session herd
shm_records 343107 21.25 SHM records
shm_writes 40917 2.53 SHM writes
shm_flushes 0 0.00 SHM flushes due to overflow
shm_cont 3 0.00 SHM MTX contention
shm_cycles 0 0.00 SHM cycles through buffer
sm_nreq 0 0.00 allocator requests
sm_nobj 0 . outstanding allocations
sm_balloc 0 . bytes allocated
sm_bfree 0 . bytes free
sma_nreq 6015 0.37 SMA allocator requests
sma_nobj 627 . SMA outstanding allocations
sma_nbytes 4035862 . SMA outstanding bytes
sma_balloc 34755299 . SMA bytes allocated
sma_bfree 30719437 . SMA bytes free
sms_nreq 3 0.00 SMS allocator requests
sms_nobj 0 . SMS outstanding allocations
sms_nbytes 0 . SMS outstanding bytes
sms_balloc 1464 . SMS bytes allocated
sms_bfree 1464 . SMS bytes freed
backend_req 3801 0.24 Backend requests made
n_vcl 1 0.00 N vcl total
n_vcl_avail 1 0.00 N vcl available
n_vcl_discard 0 0.00 N vcl discarded
n_purge 1 . N total active purges
n_purge_add 1 0.00 N new purges added
n_purge_retire 0 0.00 N old purges deleted
n_purge_obj_test 0 0.00 N objects tested
n_purge_re_test 0 0.00 N regexps tested against
n_purge_dups 0 0.00 N duplicate purges removed
hcb_nolock 1245 0.08 HCB Lookups without lock
hcb_lock 0 0.00 HCB Lookups with lock
hcb_insert 2319 0.14 HCB Inserts
esi_parse 0 0.00 Objects ESI parsed (unlock)
esi_errors 0 0.00 ESI parse errors (unlock)
accept_fail 0 0.00 Accept failures
client_drop_late 3 0.00 Connection dropped late
uptime 16145 1.00 Client uptime
Если это поможет, вот как я устанавливаю контроль кэша в моем apache2 conf:
<FilesMatch ".(ico|pdf|gif|jpg|jpeg|png|swf|js|css)$">
ExpiresActive On
ExpiresDefault A31556926
Header unset ETag
FileETag None
</FilesMatch>
Не могу понять, почему у моих N объектов с истекшим сроком действия так много?
Ответ №1:
Я полностью исправил это, добавив:
set beresp.ttl = 31556926s;
Добавлено это сразу после
sub vcl_fetch {
Теперь у меня ноль объектов с истекшим сроком действия, а частота посещений удвоилась!
Моя последняя нерешенная проблема — большое количество…
N LRU moved objects
но поскольку у меня нет записей LRU nuked в varnishstat, все, похоже, работает так, как задумано.
Комментарии:
1. LRU moved указывает только, сколько раз объекты перемещались в списке LRU внутренне. Если у вас нет проблем с блокировкой списка LRU, я бы не стал беспокоиться об этом.