Hi all
I do not sure that this topic for cta or eos so put it here.
In our eoscta instance looks like MGM tape garbage collector (TGC) does not work. I do not see any activity by logs and there are no changes in space. I think I correctly enabled it by commands provided by eoscta docs [1]. Maybe there are some defaults which are missed on fresh setup or some missed features.
Is such configuration still supported our we have to switch to cta-fst-gcd python script? Are there any hints what could be checked and where.
Bellow my eoscta setup for tgc [1] and full space config [2] with eos versions [3]. We use NS in QuarkDB.
Many thx in advance.
Cheers
Victor
[1] tgc setup
[root@tape-1-3-3 /]# eos space ls default -m | tr ' ' '\n' | grep statfs
sum.stat.statfs.usedbytes=82912054607872
sum.stat.statfs.freebytes=9975213588480
sum.stat.statfs.freebytes?configstatus@rw=9975213588480
sum.stat.statfs.capacity=92887268196352
sum.stat.statfs.ffiles=0
sum.stat.statfs.files=19482918665
sum.stat.statfs.capacity?configstatus@rw=92887268196352
[root@tape-1-3-3 /]# eos space status default | grep tgc
tgc.availbytes := 20000000000000
tgc.qryperiodsecs := 300
tgc.totalbytes := 40000000000000
[root@tape-1-3-3 /]# eos ns stat -m |grep tgc
uid=all gid=all tgc.is_active=false
[root@tape-1-3-3 /]# grep tgc /etc/xrd.cf.mgm
mgmofs.tgc.enablespace default
[2] space default
[root@tape-1-3-3 /]# eos space status default
# ------------------------------------------------------------------------------------
# Space Variables
# ....................................................................................
balancer := off
balancer.node.ntx := 2
balancer.node.rate := 25
balancer.threshold := 20
converter := off
converter.ntx := 2
drainer.node.nfs := 5
drainer.node.ntx := 2
drainer.node.rate := 25
drainperiod := 86400
filearchivedgc := off
fsck_refresh_interval := 7200
geobalancer := off
geobalancer.ntx := 10
geobalancer.threshold := 5
graceperiod := 86400
groupbalancer := off
groupbalancer.engine := std
groupbalancer.file_attempts := 50
groupbalancer.max_file_size := 16G
groupbalancer.max_threshold := 0
groupbalancer.min_file_size := 1G
groupbalancer.min_threshold := 0
groupbalancer.ntx := 10
groupbalancer.threshold := 5
groupmod := 24
groupsize := 2
lru := off
quota := off
scan_disk_interval := 14400
scan_ns_interval := 259200
scan_ns_rate := 50
scaninterval := 604800
scanrate := 100
taperestapi := off
tgc.availbytes := 20000000000000
tgc.qryperiodsecs := 300
tgc.totalbytes := 40000000000000
tracker := off
wfe := on
wfe.interval := 10
wfe.ntx := 500
[3] eos version
[root@tape-1-3-3 /]# rpm -qa|grep eos
eos-folly-2019.11.11.00-1.el7.cern.x86_64
libmicrohttpd-0.9.38-eos.yves.el7.cern.x86_64
eos-client-4.8.79-1.el7.cern.x86_64
eos-fuse-sysv-4.8.79-1.el7.cern.x86_64
eos-server-4.8.79-1.el7.cern.x86_64
eos-protobuf3-3.5.1-5.el7.cern.eos.x86_64
eos-folly-deps-2019.11.11.00-1.el7.cern.x86_64
eos-fuse-core-4.8.79-1.el7.cern.x86_64
eos-fuse-4.8.79-1.el7.cern.x86_64
eos-testkeytab-4.8.79-1.el7.cern.x86_64
eos-nginx-1.9.9-5.x86_64
eos-xrootd-4.12.8-1.el7.cern.x86_64