Skip to content

Instantly share code, notes, and snippets.

/debug.txt Secret

Created June 22, 2015 23:32
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save anonymous/9a231ff143dc7b75f788 to your computer and use it in GitHub Desktop.
Save anonymous/9a231ff143dc7b75f788 to your computer and use it in GitHub Desktop.
---- /etc/ceph/ceph.conf -----
[global]
keyring = /etc/ceph/keyring
fsid = 31a33045-2343-2097-8585-19c9fa234574
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
# ------------------------------------------------------------------------------
# Monitors
# ------------------------------------------------------------------------------
[mon]
debug mon = 20
debug paxos = 20
debug auth = 20
[mon.1]
host = ceph-01
mon addr = 10.1.1.6:6789
mon data = /var/lib/ceph/mon/ceph-$id
[mon.2]
host = ceph-02
mon addr = 10.1.1.7:6789
mon data = /var/lib/ceph/mon/ceph-$id
[mon.3]
host = ceph-03
mon addr = 10.1.1.8:6789
mon data = /var/lib/ceph/mon/ceph-$id
# ------------------------------------------------------------------------------
# OSD Section
# ------------------------------------------------------------------------------
[osd]
osd data = /var/lib/ceph/osd/ceph-$id
osd journal = /var/lib/ceph/osd$id/journal
osd journal size = 512
osd mkfs type = xfs
[osd.1]
host = ceph-01
cluster addr = 10.4.1.146:6800
public addr = 10.1.1.6:6801
devs = /dev/sda2
[osd.2]
host = ceph-02
cluster addr = 10.4.1.147:6800
public addr = 10.1.1.7:6802
devs = /dev/sda2
[osd.3]
host = ceph-03
cluster addr = 10.4.1.148:6800
public addr = 10.1.1.8:6803
devs = /dev/sda2
# ------------------------------------------------------------------------------
# MDS Servers
# ------------------------------------------------------------------------------
[mds]
mds data = /var/lib/ceph/mds/ceph-$id
[mds.1]
host = ceph-01
mds addr = 10.1.1.6:6789
[mds.2]
host = ceph-02
mds addr = 10.1.1.7:6789
[mds.3]
host = ceph-03
mds addr = 10.1.1.8:6789
---- End /etc/ceph/ceph.conf -----
--- Ansible output for configuration -----
<10.1.1.6> EXEC ['sshpass', '-d13', 'ssh', '-C', '-vvv', '-o',
'ControlMaster=auto', '-o', 'ControlPersist=60s', '-o', 'ControlPath=/home/joe/.ansible/cp/ansible-ssh-%h-%p-%r', '-o', 'Port=22', '-o', 'GSSAPIAuthentication=no', '-o', 'PubkeyAuthentication=no', '-o', 'User=zenny', '-o', 'ConnectTimeout=10', '10.1.1.6', u'/bin/sh -c \'sudo -k && sudo -H -S -p "[sudo via ansible, key=yrougenvtmfxttanammmbgtrcfqqmega] password: " -u root /bin/sh -c \'"\'"\'echo SUDO-SUCCESS-yrougenvtmfxttanammmbgtrcfqqmega; LANG=C LC_CTYPE=C /usr/bin/python\'"\'"\'\''] <10.1.1.8> EXEC ['sshpass', '-d15', 'ssh', '-C', '-vvv', '-o',
'ControlMaster=auto', '-o', 'ControlPersist=60s', '-o', 'ControlPath=/home/joe/.ansible/cp/ansible-ssh-%h-%p-%r', '-o', 'Port=22', '-o', 'GSSAPIAuthentication=no', '-o', 'PubkeyAuthentication=no', '-o', 'User=zenny', '-o', 'ConnectTimeout=10', '10.1.1.8', u'/bin/sh -c \'sudo -k && sudo -H -S -p "[sudo via ansible, key=nsyfzopnsxntomyhecnvjcouwssqokac] password: " -u root /bin/sh -c \'"\'"\'echo SUDO-SUCCESS-nsyfzopnsxntomyhecnvjcouwssqokac; LANG=C LC_CTYPE=C /usr/bin/python\'"\'"\'\''] <10.1.1.7> EXEC ['sshpass', '-d14', 'ssh', '-C', '-vvv', '-o',
'ControlMaster=auto', '-o', 'ControlPersist=60s', '-o', 'ControlPath=/home/joe/.ansible/cp/ansible-ssh-%h-%p-%r', '-o', 'Port=22', '-o', 'GSSAPIAuthentication=no', '-o', 'PubkeyAuthentication=no', '-o', 'User=zenny', '-o', 'ConnectTimeout=10', '10.1.1.7', u'/bin/sh -c \'sudo -k && sudo -H -S -p "[sudo via ansible, key=qigivkpecyvdyjuhrimhzbfqcfczdomr] password: " -u root /bin/sh -c \'"\'"\'echo SUDO-SUCCESS-qigivkpecyvdyjuhrimhzbfqcfczdomr; LANG=C LC_CTYPE=C /usr/bin/python\'"\'"\'\'']
failed: [ceph-02] => {"failed": true}
msg: Job for ceph.service failed. See 'systemctl status ceph.service' and 'journalctl -xn' for details.
failed: [ceph-03] => {"failed": true}
msg: Job for ceph.service failed. See 'systemctl status ceph.service' and 'journalctl -xn' for details.
failed: [ceph-01] => {"failed": true}
msg: Job for ceph.service failed. See 'systemctl status ceph.service' and 'journalctl -xn' for details.
FATAL: all hosts have already failed -- aborting
PLAY RECAP ********************************************************************
to retry, use: --limit @/home/joe/ceph.retry
ceph-01 : ok=25 changed=8 unreachable=0 failed=1
ceph-02 : ok=25 changed=8 unreachable=0 failed=1
ceph-03 : ok=25 changed=8 unreachable=0 failed=1
Makefile:9: recipe for target 'ceph' failed
make: *** [ceph] Error 2
--------------------------------------------------------------
[root@ceph-01:/var/log/ceph]: systemctl status -l ceph.service
● ceph.service - LSB: Start Ceph distributed file system daemons at boot time
Loaded: loaded (/etc/init.d/ceph)
Active: failed (Result: exit-code) since Mon 2015-06-22 13:23:39 CDT; 1min 16s ago
Process: 463 ExecStop=/etc/init.d/ceph stop (code=exited, status=0/SUCCESS)
Process: 3692 ExecStart=/etc/init.d/ceph start (code=exited, status=1/FAILURE)
Jun 22 13:23:21 ceph-01 ceph[3692]: 2015-06-22 13:23:21.657167 7f91c4624700 0 -- :/1004027 >> 10.1.1.7:6789/0 pipe(0x7f91b40022a0 sd=3 :0 s=1 pgs=0 cs=0 l=1 c=0x7f91b4002530).fault
Jun 22 13:23:24 ceph-01 ceph[3692]: 2015-06-22 13:23:24.657348 7f91c4523700 0 -- :/1004027 >> 10.1.1.8:6789/0 pipe(0x7f91b40038e0 sd=3 :0 s=1 pgs=0 cs=0 l=1 c=0x7f91b4002dc0).fault
Jun 22 13:23:27 ceph-01 ceph[3692]: 2015-06-22 13:23:27.657163 7f91c4624700 0 -- :/1004027 >> 10.1.1.6:6789/0 pipe(0x7f91b4002370 sd=4 :0 s=1 pgs=0 cs=0 l=1 c=0x7f91b40052e0).fault
Jun 22 13:23:30 ceph-01 ceph[3692]: 2015-06-22 13:23:30.657753 7f91c4523700 0 -- :/1004027 >> 10.1.1.8:6789/0 pipe(0x7f91b4005b20 sd=3 :0 s=1 pgs=0 cs=0 l=1 c=0x7f91b4005db0).fault
Jun 22 13:23:33 ceph-01 ceph[3692]: 2015-06-22 13:23:33.657763 7f91c4624700 0 -- :/1004027 >> 10.1.1.6:6789/0 pipe(0x7f91b4004010 sd=4 :0 s=1 pgs=0 cs=0 l=1 c=0x7f91b40042a0).fault
Jun 22 13:23:36 ceph-01 ceph[3692]: 2015-06-22 13:23:36.658343 7f91c4523700 0 -- :/1004027 >> 10.1.1.7:6789/0 pipe(0x7f91b4004a40 sd=4 :0 s=1 pgs=0 cs=0 l=1 c=0x7f91b4004cd0).fault
Jun 22 13:23:39 ceph-01 ceph[3692]: failed: 'timeout 30 /usr/bin/ceph -c /etc/ceph/ceph.conf --name=osd.1 --keyring=/etc/ceph/keyring osd crush create-or-move -- 1 0.02 host=ceph-01 root=default'
Jun 22 13:23:39 ceph-01 systemd[1]: ceph.service: control process exited, code=exited status=1
Jun 22 13:23:39 ceph-01 systemd[1]: Failed to start LSB: Start Ceph distributed file system daemons at boot time.
Jun 22 13:23:39 ceph-01 systemd[1]: Unit ceph.service entered failed state.
---------------------------- Logs ---------------------------------------
[root@ceph-01:/var/log/ceph]: cat ceph-mon.1.log
2015-06-22 12:27:14.883126 7f9c1214d7c0 0 ceph version 0.80.7 (6c0127fcb58008793d3c8b62d925bc91963672a3), process ceph-mon, pid 23838
2015-06-22 12:27:14.887237 7f9c1214d7c0 10 load: jerasure
2015-06-22 12:27:14.893749 7f9c1214d7c0 10 check if store needs conversion from legacy format
2015-06-22 12:27:14.893761 7f9c1214d7c0 1 store(/var/lib/ceph/mon/ceph-1) mount
2015-06-22 12:27:14.893787 7f9c1214d7c0 15 store(/var/lib/ceph/mon/ceph-1) exists_bl magic
2015-06-22 12:27:14.893825 7f9c1214d7c0 -1 unable to read magic from mon data.. did you run mkcephfs?
2015-06-22 13:12:55.010472 7f52ad5317c0 0 ceph version 0.80.7 (6c0127fcb58008793d3c8b62d925bc91963672a3), process ceph-mon, pid 1195
2015-06-22 13:12:55.014721 7f52ad5317c0 10 load: jerasure
2015-06-22 13:12:55.023268 7f52ad5317c0 10 check if store needs conversion from legacy format
2015-06-22 13:12:55.023279 7f52ad5317c0 1 store(/var/lib/ceph/mon/ceph-1) mount
2015-06-22 13:12:55.023301 7f52ad5317c0 15 store(/var/lib/ceph/mon/ceph-1) exists_bl magic
2015-06-22 13:12:55.023340 7f52ad5317c0 -1 unable to read magic from mon data.. did you run mkcephfs?
2015-06-22 13:23:09.361742 7fd3f97a97c0 0 ceph version 0.80.7 (6c0127fcb58008793d3c8b62d925bc91963672a3), process ceph-mon, pid 3784
2015-06-22 13:23:09.365886 7fd3f97a97c0 10 load: jerasure
2015-06-22 13:23:09.379478 7fd3f97a97c0 10 check if store needs conversion from legacy format
2015-06-22 13:23:09.379493 7fd3f97a97c0 1 store(/var/lib/ceph/mon/ceph-1) mount
2015-06-22 13:23:09.379518 7fd3f97a97c0 15 store(/var/lib/ceph/mon/ceph-1) exists_bl magic
2015-06-22 13:23:09.379548 7fd3f97a97c0 -1 unable to read magic from mon data.. did you run mkcephfs?
[root@ceph-01:/var/log/ceph]: tail -6 ceph-mds.1.log
2015-06-22 13:25:39.489096 7fd183af8700 0 -- 0.0.0.0:6800/3890 >> 10.1.1.7:6789/0 pipe(0x7fd186ab8280 sd=9 :0 s=1 pgs=0 cs=0 l=1 c=0x7fd186a59760).fault
2015-06-22 13:25:42.489429 7fd18317d700 0 -- 0.0.0.0:6800/3890 >> 10.1.1.8:6789/0 pipe(0x7fd186ab8a00 sd=8 :0 s=1 pgs=0 cs=0 l=1 c=0x7fd186a59a20).fault
2015-06-22 13:25:45.489183 7fd183af8700 0 -- 0.0.0.0:6800/3890 >> 10.1.1.6:6789/0 pipe(0x7fd186ab8f00 sd=8 :0 s=1 pgs=0 cs=0 l=1 c=0x7fd186a59ce0).fault
2015-06-22 13:25:48.489985 7fd18317d700 0 -- 0.0.0.0:6800/3890 >> 10.1.1.7:6789/0 pipe(0x7fd186ab9400 sd=8 :0 s=1 pgs=0 cs=0 l=1 c=0x7fd186a59fa0).fault
2015-06-22 13:25:51.489706 7fd183af8700 0 -- 0.0.0.0:6800/3890 >> 10.1.1.6:6789/0 pipe(0x7fd186ab9900 sd=8 :0 s=1 pgs=0 cs=0 l=1 c=0x7fd186a5a260).fault
2015-06-22 13:25:54.490407 7fd18317d700 0 -- 0.0.0.0:6800/3890 >> 10.1.1.7:6789/0 pipe(0x7fd186ab9e00 sd=8 :0 s=1 pgs=0 cs=0 l=1 c=0x7fd186a59fa0).fault
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment