Commit 01777b31 authored by Dietmar Maurer's avatar Dietmar Maurer

ceph: add API and buttons for osd in/out

And move OSD API into separate package PVE::API2::CephOSD
parent 33c1150f
package PVE::API2::Ceph; package PVE::API2::CephOSD;
use strict; use strict;
use warnings; use warnings;
use File::Basename;
use File::Path;
use POSIX qw (LONG_MAX);
use Cwd qw(abs_path); use Cwd qw(abs_path);
use IO::Dir;
use UUID;
use Net::IP;
use PVE::SafeSyslog; use PVE::SafeSyslog;
use PVE::Tools qw(extract_param run_command file_get_contents file_read_firstline dir_glob_regex dir_glob_foreach); use PVE::Tools qw(extract_param run_command file_get_contents file_read_firstline dir_glob_regex dir_glob_foreach);
...@@ -20,7 +14,6 @@ use PVE::Storage; ...@@ -20,7 +14,6 @@ use PVE::Storage;
use PVE::RESTHandler; use PVE::RESTHandler;
use PVE::RPCEnvironment; use PVE::RPCEnvironment;
use PVE::JSONSchema qw(get_standard_option); use PVE::JSONSchema qw(get_standard_option);
use JSON;
use PVE::RADOS; use PVE::RADOS;
use PVE::CephTools; use PVE::CephTools;
...@@ -28,524 +21,780 @@ use base qw(PVE::RESTHandler); ...@@ -28,524 +21,780 @@ use base qw(PVE::RESTHandler);
use Data::Dumper; # fixme: remove use Data::Dumper; # fixme: remove
my $get_osd_status = sub {
my ($rados, $osdid) = @_;
# we can use longer rados timeout when inside workers my $stat = $rados->mon_command({ prefix => 'osd dump' });
my $long_rados_timeout = 60;
my $pve_osd_default_journal_size = 1024*5; my $osdlist = $stat->{osds} || [];
my $osdstat;
foreach my $d (@$osdlist) {
$osdstat->{$d->{osd}} = $d if defined($d->{osd});
}
if (defined($osdid)) {
die "no such OSD '$osdid'\n" if !$osdstat->{$osdid};
return $osdstat->{$osdid};
}
sub list_disks { return $osdstat;
my $disklist = {}; };
my $fd = IO::File->new("/proc/mounts", "r") ||
die "unable to open /proc/mounts - $!\n";
my $mounted = {}; __PACKAGE__->register_method ({
name => 'index',
path => '',
method => 'GET',
description => "Get Ceph osd list/tree.",
proxyto => 'node',
protected => 1,
parameters => {
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
},
},
# fixme: return a list instead of extjs tree format ?
returns => {
type => "object",
},
code => sub {
my ($param) = @_;
while (defined(my $line = <$fd>)) { PVE::CephTools::check_ceph_inited();
my ($dev, $path, $fstype) = split(/\s+/, $line);
next if !($dev && $path && $fstype);
next if $dev !~ m|^/dev/|;
my $real_dev = abs_path($dev);
$mounted->{$real_dev} = $path;
}
close($fd);
my $dev_is_mounted = sub { my $rados = PVE::RADOS->new();
my ($dev) = @_; my $res = $rados->mon_command({ prefix => 'osd tree' });
return $mounted->{$dev};
};
my $dir_is_epmty = sub { die "no tree nodes found\n" if !($res && $res->{nodes});
my ($dir) = @_;
my $dh = IO::Dir->new ($dir); my $osdhash = &$get_osd_status($rados);
return 1 if !$dh;
while (defined(my $tmp = $dh->read)) {
next if $tmp eq '.' || $tmp eq '..';
$dh->close;
return 0;
}
$dh->close;
return 1;
};
my $journal_uuid = '45b0969e-9b03-4f30-b4c6-b4b80ceff106';
my $journalhash = {};
dir_glob_foreach('/dev/disk/by-parttypeuuid', "$journal_uuid\..+", sub {
my ($entry) = @_;
my $real_dev = abs_path("/dev/disk/by-parttypeuuid/$entry");
$journalhash->{$real_dev} = 1;
});
dir_glob_foreach('/sys/block', '.*', sub {
my ($dev) = @_;
return if $dev eq '.';
return if $dev eq '..';
return if $dev =~ m|^ram\d+$|; # skip ram devices
return if $dev =~ m|^loop\d+$|; # skip loop devices
return if $dev =~ m|^md\d+$|; # skip md devices
return if $dev =~ m|^dm-.*$|; # skip dm related things
return if $dev =~ m|^fd\d+$|; # skip Floppy
return if $dev =~ m|^sr\d+$|; # skip CDs
my $devdir = "/sys/block/$dev/device";
return if ! -d $devdir;
my $size = file_read_firstline("/sys/block/$dev/size");
return if !$size;
$size = $size * 512; my $nodes = {};
my $newnodes = {};
foreach my $e (@{$res->{nodes}}) {
$nodes->{$e->{id}} = $e;
my $new = {
id => $e->{id},
name => $e->{name},
type => $e->{type}
};
my $info = `udevadm info --path /sys/block/$dev --query all`; foreach my $opt (qw(status crush_weight reweight)) {
return if !$info; $new->{$opt} = $e->{$opt} if defined($e->{$opt});
}
return if $info !~ m/^E: DEVTYPE=disk$/m; if (my $stat = $osdhash->{$e->{id}}) {
return if $info =~ m/^E: ID_CDROM/m; $new->{in} = $stat->{in} if defined($stat->{in});
}
my $serial = 'unknown'; $newnodes->{$e->{id}} = $new;
if ($info =~ m/^E: ID_SERIAL_SHORT=(\S+)$/m) {
$serial = $1;
} }
my $gpt = 0; foreach my $e (@{$res->{nodes}}) {
if ($info =~ m/^E: ID_PART_TABLE_TYPE=gpt$/m) { my $new = $newnodes->{$e->{id}};
$gpt = 1; if ($e->{children} && scalar(@{$e->{children}})) {
$new->{children} = [];
$new->{leaf} = 0;
foreach my $cid (@{$e->{children}}) {
$nodes->{$cid}->{parent} = $e->{id};
if ($nodes->{$cid}->{type} eq 'osd' &&
$e->{type} eq 'host') {
$newnodes->{$cid}->{host} = $e->{name};
}
push @{$new->{children}}, $newnodes->{$cid};
}
} else {
$new->{leaf} = ($e->{id} >= 0) ? 1 : 0;
}
} }
# detect SSD (fixme - currently only works for ATA disks) my $rootnode;
my $rpm = 7200; # default guess foreach my $e (@{$res->{nodes}}) {
if ($info =~ m/^E: ID_ATA_ROTATION_RATE_RPM=(\d+)$/m) { if (!$nodes->{$e->{id}}->{parent}) {
$rpm = $1; $rootnode = $newnodes->{$e->{id}};
last;
}
} }
my $vendor = file_read_firstline("$devdir/vendor") || 'unknown'; die "no root node\n" if !$rootnode;
my $model = file_read_firstline("$devdir/model") || 'unknown';
my $used; my $data = { root => $rootnode };
$used = 'LVM' if !&$dir_is_epmty("/sys/block/$dev/holders"); return $data;
}});
$used = 'mounted' if &$dev_is_mounted("/dev/$dev"); __PACKAGE__->register_method ({
name => 'createosd',
path => '',
method => 'POST',
description => "Create OSD",
proxyto => 'node',
protected => 1,
parameters => {
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
dev => {
description => "Block device name.",
type => 'string',
},
journal_dev => {
description => "Block device name for journal.",
optional => 1,
type => 'string',
},
fstype => {
description => "File system type.",
type => 'string',
enum => ['xfs', 'ext4', 'btrfs'],
default => 'xfs',
optional => 1,
},
},
},
returns => { type => 'string' },
code => sub {
my ($param) = @_;
$disklist->{$dev} = { my $rpcenv = PVE::RPCEnvironment::get();
vendor => $vendor,
model => $model,
size => $size,
serial => $serial,
gpt => $gpt,
rmp => $rpm,
};
my $osdid = -1; my $authuser = $rpcenv->get_user();
my $journal_count = 0; PVE::CephTools::check_ceph_inited();
my $found_partitions; PVE::CephTools::setup_pve_symlinks();
my $found_lvm;
my $found_mountpoints;
dir_glob_foreach("/sys/block/$dev", "$dev.+", sub {
my ($part) = @_;
$found_partitions = 1; my $journal_dev;
if (my $mp = &$dev_is_mounted("/dev/$part")) { if ($param->{journal_dev} && ($param->{journal_dev} ne $param->{dev})) {
$found_mountpoints = 1; $journal_dev = PVE::CephTools::verify_blockdev_path($param->{journal_dev});
if ($mp =~ m|^/var/lib/ceph/osd/ceph-(\d+)$|) { }
$osdid = $1;
}
}
if (!&$dir_is_epmty("/sys/block/$dev/$part/holders")) {
$found_lvm = 1;
}
$journal_count++ if $journalhash->{"/dev/$part"};
});
$used = 'mounted' if $found_mountpoints && !$used; $param->{dev} = PVE::CephTools::verify_blockdev_path($param->{dev});
$used = 'LVM' if $found_lvm && !$used;
$used = 'partitions' if $found_partitions && !$used;
$disklist->{$dev}->{used} = $used if $used; my $disklist = PVE::CephTools::list_disks();
$disklist->{$dev}->{osdid} = $osdid;
$disklist->{$dev}->{journals} = $journal_count;
});
return $disklist; my $devname = $param->{dev};
} $devname =~ s|/dev/||;
my $diskinfo = $disklist->{$devname};
die "unable to get device info for '$devname'\n"
if !$diskinfo;
my $lookup_diskinfo = sub { die "device '$param->{dev}' is in use\n"
my ($disklist, $disk) = @_; if $diskinfo->{used};
my $real_dev = abs_path($disk); my $rados = PVE::RADOS->new();
$real_dev =~ s|/dev/||; my $monstat = $rados->mon_command({ prefix => 'mon_status' });
my $diskinfo = $disklist->{$real_dev}; die "unable to get fsid\n" if !$monstat->{monmap} || !$monstat->{monmap}->{fsid};
die "disk '$disk' not found in disk list\n" if !$diskinfo;
return wantarray ? ($diskinfo, $real_dev) : $diskinfo; my $fsid = $monstat->{monmap}->{fsid};
}; $fsid = $1 if $fsid =~ m/^([0-9a-f\-]+)$/;
my $ceph_bootstrap_osd_keyring = PVE::CephTools::get_config('ceph_bootstrap_osd_keyring');
my $count_journal_disks = sub {
my ($disklist, $disk) = @_;
my $count = 0; if (! -f $ceph_bootstrap_osd_keyring) {
my $bindata = $rados->mon_command({ prefix => 'auth get client.bootstrap-osd', format => 'plain' });
PVE::Tools::file_set_contents($ceph_bootstrap_osd_keyring, $bindata);
};
my $worker = sub {
my $upid = shift;
my ($diskinfo, $real_dev) = &$lookup_diskinfo($disklist, $disk); my $fstype = $param->{fstype} || 'xfs';
die "journal disk '$disk' does not contain a GUID partition table\n"
if !$diskinfo->{gpt};
$count = $diskinfo->{journals} if $diskinfo->{journals}; print "create OSD on $param->{dev} ($fstype)\n";
return $count; my $ccname = PVE::CephTools::get_config('ccname');
};
__PACKAGE__->register_method ({ my $cmd = ['ceph-disk', 'prepare', '--zap-disk', '--fs-type', $fstype,
name => 'index', '--cluster', $ccname, '--cluster-uuid', $fsid ];
path => '',
method => 'GET',
description => "Directory index.",
permissions => { user => 'all' },
parameters => {
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
},
},
returns => {
type => 'array',
items => {
type => "object",
properties => {},
},
links => [ { rel => 'child', href => "{name}" } ],
},
code => sub {
my ($param) = @_;
my $result = [ if ($journal_dev) {
{ name => 'init' }, print "using device '$journal_dev' for journal\n";
{ name => 'mon' }, push @$cmd, '--journal-dev', $param->{dev}, $journal_dev;
{ name => 'osd' }, } else {
{ name => 'pools' }, push @$cmd, $param->{dev};
{ name => 'stop' }, }
{ name => 'start' },
{ name => 'status' }, run_command($cmd);
{ name => 'crush' }, };
{ name => 'config' },
{ name => 'log' },
{ name => 'disks' },
];
return $result; return $rpcenv->fork_worker('cephcreateosd', $devname, $authuser, $worker);
}}); }});
__PACKAGE__->register_method ({ __PACKAGE__->register_method ({
name => 'disks', name => 'destroyosd',
path => 'disks', path => '{osdid}',
method => 'GET', method => 'DELETE',
description => "List local disks.", description => "Destroy OSD",
proxyto => 'node', proxyto => 'node',
protected => 1, protected => 1,
parameters => { parameters => {
additionalProperties => 0, additionalProperties => 0,
properties => { properties => {
node => get_standard_option('pve-node'), node => get_standard_option('pve-node'),
type => { osdid => {
description => "Only list specific types of disks.", description => 'OSD ID',
type => 'string', type => 'integer',
enum => ['unused', 'journal_disks'],
optional => 1,
}, },
}, cleanup => {
}, description => "If set, we remove partition table entries.",
returns => { type => 'boolean',
type => 'array', optional => 1,
items => { default => 0,
type => "object",
properties => {
dev => { type => 'string' },
used => { type => 'string', optional => 1 },
gpt => { type => 'boolean' },
size => { type => 'integer' },
osdid => { type => 'integer' },
vendor => { type => 'string', optional => 1 },
model => { type => 'string', optional => 1 },
serial => { type => 'string', optional => 1 },
}, },
}, },
# links => [ { rel => 'child', href => "{}" } ],
}, },
returns => { type => 'string' },
code => sub { code => sub {
my ($param) = @_; my ($param) = @_;
PVE::CephTools::check_ceph_inited(); my $rpcenv = PVE::RPCEnvironment::get();
my $disks = list_disks(); my $authuser = $rpcenv->get_user();
my $res = []; PVE::CephTools::check_ceph_inited();
foreach my $dev (keys %$disks) {
my $d = $disks->{$dev};
if ($param->{type}) {
if ($param->{type} eq 'journal_disks') {
next if $d->{osdid} >= 0;
next if !$d->{gpt};
} elsif ($param->{type} eq 'unused') {
next if $d->{used};
} else {
die "internal error"; # should not happen
}
}
$d->{dev} = "/dev/$dev"; my $osdid = $param->{osdid};
push @$res, $d;
}
return $res; my $rados = PVE::RADOS->new();
}}); my $osdstat = &$get_osd_status($rados, $osdid);
__PACKAGE__->register_method ({ die "osd is in use (in == 1)\n" if $osdstat->{in};
name => 'config', #&$run_ceph_cmd(['osd', 'out', $osdid]);
path => 'config',
method => 'GET',
description => "Get Ceph configuration.",
parameters => {
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
},
},
returns => { type => 'string' },
code => sub {
my ($param) = @_;
PVE::CephTools::check_ceph_inited(); die "osd is still runnung (up == 1)\n" if $osdstat->{up};
my $path = PVE::CephTools::get_config('pve_ceph_cfgpath'); my $osdsection = "osd.$osdid";
return PVE::Tools::file_get_contents($path);
}}); my $worker = sub {
my $upid = shift;
__PACKAGE__->register_method ({ # reopen with longer timeout
name => 'listmon', $rados = PVE::RADOS->new(timeout => PVE::CephTools::get_config('long_rados_timeout'));
path => 'mon',
method => 'GET',
description => "Get Ceph monitor list.",
proxyto => 'node',
protected => 1,
parameters => {
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
},
},
returns => {
type => 'array',
items => {
type => "object",
properties => {
name => { type => 'string' },
addr => { type => 'string' },
},
},
links => [ { rel => 'child', href => "{name}" } ],
},
code => sub {
my ($param) = @_;
PVE::CephTools::check_ceph_inited(); print "destroy OSD $osdsection\n";
my $res = []; eval { PVE::CephTools::ceph_service_cmd('stop', $osdsection); };
warn $@ if $@;
my $cfg = PVE::CephTools::parse_ceph_config(); print "Remove $osdsection from the CRUSH map\n";
$rados->mon_command({ prefix => "osd crush remove", name => $osdsection, format => 'plain' });
my $monhash = {}; print "Remove the $osdsection authentication key.\n";
foreach my $section (keys %$cfg) { $rados->mon_command({ prefix => "auth del", entity => $osdsection, format => 'plain' });
my $d = $cfg->{$section};
if ($section =~ m/^mon\.(\S+)$/) { print "Remove OSD $osdsection\n";
my $monid = $1; $rados->mon_command({ prefix => "osd rm", ids => [ $osdsection ], format => 'plain' });
if ($d->{'mon addr'} && $d->{'host'}) {
$monhash->{$monid} = { # try to unmount from standard mount point
addr => $d->{'mon addr'}, my $mountpoint = "/var/lib/ceph/osd/ceph-$osdid";
host => $d->{'host'},
name => $monid, my $remove_partition = sub {
my ($disklist, $part) = @_;
return if !$part || (! -b $part );
foreach my $real_dev (keys %$disklist) {
my $diskinfo = $disklist->{$real_dev};
next if !$diskinfo->{gpt};
if ($part =~ m|^/dev/${real_dev}(\d+)$|) {
my $partnum = $1;
print "remove partition $part (disk '/dev/${real_dev}', partnum $partnum)\n";
eval { run_command(['/sbin/sgdisk', '-d', $partnum, "/dev/${real_dev}"]); };
warn $@ if $@;
last;
} }
} }
} };
}
eval { my $journal_part;
my $rados = PVE::RADOS->new(); my $data_part;
my $monstat = $rados->mon_command({ prefix => 'mon_status' });
my $mons = $monstat->{monmap}->{mons}; if ($param->{cleanup}) {
foreach my $d (@$mons) { my $jpath = "$mountpoint/journal";
next if !defined($d->{name}); $journal_part = abs_path($jpath);
$monhash->{$d->{name}}->{rank} = $d->{rank};
$monhash->{$d->{name}}->{addr} = $d->{addr}; if (my $fd = IO::File->new("/proc/mounts", "r")) {
if (grep { $_ eq $d->{rank} } @{$monstat->{quorum}}) { while (defined(my $line = <$fd>)) {
$monhash->{$d->{name}}->{quorum} = 1; my ($dev, $path, $fstype) = split(/\s+/, $line);
next if !($dev && $path && $fstype);
next if $dev !~ m|^/dev/|;
if ($path eq $mountpoint) {
$data_part = abs_path($dev);
last;
}
}
close($fd);
} }
} }
print "Unmount OSD $osdsection from $mountpoint\n";
eval { run_command(['umount', $mountpoint]); };
if (my $err = $@) {
warn $err;
} elsif ($param->{cleanup}) {
my $disklist = PVE::CephTools::list_disks();
&$remove_partition($disklist, $journal_part);
&$remove_partition($disklist, $data_part);
}
}; };
warn $@ if $@;
return PVE::RESTHandler::hash_to_array($monhash, 'name'); return $rpcenv->fork_worker('cephdestroyosd', $osdsection, $authuser, $worker);
}}); }});
__PACKAGE__->register_method ({ __PACKAGE__->register_method ({
name => 'init', name => 'in',
path => 'init', path => '{osdid}/in',
method => 'POST', method => 'POST',
description => "Create initial ceph default configuration and setup symlinks.", description => "ceph osd in",
proxyto => 'node', proxyto => 'node',
protected => 1, protected => 1,
parameters => { parameters => {
additionalProperties => 0, additionalProperties => 0,
properties => { properties => {
node => get_standard_option('pve-node'), node => get_standard_option('pve-node'),
network => { osdid => {
description => "Use specific network for all ceph related traffic", description => 'OSD ID',
type => 'string', format => 'CIDR',
optional => 1,
maxLength => 128,
},
size => {
description => 'Number of replicas per object',
type => 'integer',
default => 2,
optional => 1,
minimum => 1,
maximum => 3,
},
pg_bits => {
description => "Placement group bits, used to specify the default number of placement groups (Note: 'osd pool default pg num' does not work for deafult pools)",
type => 'integer', type => 'integer',
default => 6,
optional => 1,
minimum => 6,
maximum => 14,
}, },
}, },
}, },
returns => { type => 'null' }, returns => { type => "null" },
code => sub { code => sub {
my ($param) = @_; my ($param) = @_;
PVE::CephTools::check_ceph_installed(); PVE::CephTools::check_ceph_inited();
# simply load old config if it already exists
my $cfg = PVE::CephTools::parse_ceph_config();
if (!$cfg->{global}) {
my $fsid;
my $uuid;
UUID::generate($uuid);
UUID::unparse($uuid, $fsid);
$cfg->{global} = {
'fsid' => $fsid,
'auth supported' => 'cephx',
'auth cluster required' => 'cephx',
'auth service required' => 'cephx',
'auth client required' => 'cephx',
'filestore xattr use omap' => 'true',
'osd journal size' => $pve_osd_default_journal_size,
'osd pool default min size' => 1,
};
# this does not work for default pools
#'osd pool default pg num' => $pg_num,
#'osd pool default pgp num' => $pg_num,
}
$cfg->{global}->{keyring} = '/etc/pve/priv/$cluster.$name.keyring';
$cfg->{osd}->{keyring} = '/var/lib/ceph/osd/ceph-$id/keyring';
$cfg->{global}->{'osd pool default size'} = $param->{size} if $param->{size}; my $osdid = $param->{osdid};
if ($param->{pg_bits}) { my $rados = PVE::RADOS->new();
$cfg->{global}->{'osd pg bits'} = $param->{pg_bits};
$cfg->{global}->{'osd pgp bits'} = $param->{pg_bits};
}
if ($param->{network}) { my $osdstat = &$get_osd_status($rados, $osdid); # osd exists?
$cfg->{global}->{'public network'} = $param->{network};
$cfg->{global}->{'cluster network'} = $param->{network};
}
PVE::CephTools::write_ceph_config($cfg); my $osdsection = "osd.$osdid";
PVE::CephTools::setup_pve_symlinks(); $rados->mon_command({ prefix => "osd in", ids => [ $osdsection ], format => 'plain' });
return undef; return undef;
}}); }});
my $find_node_ip = sub {
my ($cidr) = @_;
my $config = PVE::INotify::read_file('interfaces');
my $net = Net::IP->new($cidr) || die Net::IP::Error() . "\n";
foreach my $iface (keys %$config) {
my $d = $config->{$iface};
next if !$d->{address};
my $a = Net::IP->new($d->{address});
next if !$a;
return $d->{address} if $net->overlaps($a);
}
die "unable to find local address within network '$cidr'\n";
};
__PACKAGE__->register_method ({ __PACKAGE__->register_method ({
name => 'createmon', name => 'out',
path => 'mon', path => '{osdid}/out',
method => 'POST', method => 'POST',
description => "Create Ceph Monitor", description => "ceph osd out",
proxyto => 'node', proxyto => 'node',
protected => 1, protected => 1,
parameters => { parameters => {
additionalProperties => 0, additionalProperties => 0,
properties => { properties => {
node => get_standard_option('pve-node'), node => get_standard_option('pve-node'),
osdid => {
description => 'OSD ID',
type => 'integer',
},
}, },
}, },
returns => { type => 'string' }, returns => { type => "null" },
code => sub { code => sub {
my ($param) = @_; my ($param) = @_;
PVE::CephTools::check_ceph_inited(); PVE::CephTools::check_ceph_inited();
PVE::CephTools::setup_pve_symlinks(); my $osdid = $param->{osdid};
my $rpcenv = PVE::RPCEnvironment::get(); my $rados = PVE::RADOS->new();
my $authuser = $rpcenv->get_user(); my $osdstat = &$get_osd_status($rados, $osdid); # osd exists?
my $cfg = PVE::CephTools::parse_ceph_config(); my $osdsection = "osd.$osdid";
my $moncount = 0; $rados->mon_command({ prefix => "osd out", ids => [ $osdsection ], format => 'plain' });
my $monaddrhash = {}; return undef;
}});
foreach my $section (keys %$cfg) { package PVE::API2::Ceph;
next if $section eq 'global';
my $d = $cfg->{$section}; use strict;
if ($section =~ m/^mon\./) { use warnings;
$moncount++; use File::Basename;
if ($d->{'mon addr'}) { use File::Path;
$monaddrhash->{$d->{'mon addr'}} = $section; use POSIX qw (LONG_MAX);
} use Cwd qw(abs_path);
} use IO::Dir;
} use UUID;
use Net::IP;
use PVE::SafeSyslog;
use PVE::Tools qw(extract_param run_command file_get_contents file_read_firstline dir_glob_regex dir_glob_foreach);
use PVE::Exception qw(raise raise_param_exc);
use PVE::INotify;
use PVE::Cluster qw(cfs_lock_file cfs_read_file cfs_write_file);
use PVE::AccessControl;
use PVE::Storage;
use PVE::RESTHandler;
use PVE::RPCEnvironment;
use PVE::JSONSchema qw(get_standard_option);
use JSON;
use PVE::RADOS;
use PVE::CephTools;
use base qw(PVE::RESTHandler);
use Data::Dumper; # fixme: remove
my $pve_osd_default_journal_size = 1024*5;
__PACKAGE__->register_method ({
subclass => "PVE::API2::CephOSD",
path => 'osd',
});
__PACKAGE__->register_method ({
name => 'index',
path => '',
method => 'GET',
description => "Directory index.",
permissions => { user => 'all' },
parameters => {
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
},
},
returns => {
type => 'array',
items => {
type => "object",
properties => {},
},
links => [ { rel => 'child', href => "{name}" } ],
},
code => sub {
my ($param) = @_;
my $result = [
{ name => 'init' },
{ name => 'mon' },
{ name => 'osd' },
{ name => 'pools' },
{ name => 'stop' },
{ name => 'start' },
{ name => 'status' },
{ name => 'crush' },
{ name => 'config' },
{ name => 'log' },
{ name => 'disks' },
];
return $result;
}});
__PACKAGE__->register_method ({
name => 'disks',
path => 'disks',
method => 'GET',
description => "List local disks.",
proxyto => 'node',
protected => 1,
parameters => {
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
type => {
description => "Only list specific types of disks.",
type => 'string',
enum => ['unused', 'journal_disks'],
optional => 1,
},
},
},
returns => {
type => 'array',
items => {
type => "object",
properties => {
dev => { type => 'string' },
used => { type => 'string', optional => 1 },
gpt => { type => 'boolean' },
size => { type => 'integer' },
osdid => { type => 'integer' },
vendor => { type => 'string', optional => 1 },
model => { type => 'string', optional => 1 },
serial => { type => 'string', optional => 1 },
},
},
# links => [ { rel => 'child', href => "{}" } ],
},
code => sub {
my ($param) = @_;
PVE::CephTools::check_ceph_inited();
my $disks = PVE::CephTools::list_disks();
my $res = [];
foreach my $dev (keys %$disks) {
my $d = $disks->{$dev};
if ($param->{type}) {
if ($param->{type} eq 'journal_disks') {
next if $d->{osdid} >= 0;
next if !$d->{gpt};
} elsif ($param->{type} eq 'unused') {
next if $d->{used};
} else {
die "internal error"; # should not happen
}
}
$d->{dev} = "/dev/$dev";
push @$res, $d;
}
return $res;
}});
__PACKAGE__->register_method ({
name => 'config',
path => 'config',
method => 'GET',
description => "Get Ceph configuration.",
parameters => {
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
},
},
returns => { type => 'string' },
code => sub {
my ($param) = @_;
PVE::CephTools::check_ceph_inited();
my $path = PVE::CephTools::get_config('pve_ceph_cfgpath');
return PVE::Tools::file_get_contents($path);
}});
__PACKAGE__->register_method ({
name => 'listmon',
path => 'mon',
method => 'GET',
description => "Get Ceph monitor list.",
proxyto => 'node',
protected => 1,
parameters => {
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
},
},
returns => {
type => 'array',
items => {
type => "object",
properties => {
name => { type => 'string' },
addr => { type => 'string' },
},
},
links => [ { rel => 'child', href => "{name}" } ],
},
code => sub {
my ($param) = @_;
PVE::CephTools::check_ceph_inited();
my $res = [];
my $cfg = PVE::CephTools::parse_ceph_config();
my $monhash = {};
foreach my $section (keys %$cfg) {
my $d = $cfg->{$section};
if ($section =~ m/^mon\.(\S+)$/) {
my $monid = $1;
if ($d->{'mon addr'} && $d->{'host'}) {
$monhash->{$monid} = {
addr => $d->{'mon addr'},
host => $d->{'host'},
name => $monid,
}
}
}
}
eval {
my $rados = PVE::RADOS->new();
my $monstat = $rados->mon_command({ prefix => 'mon_status' });
my $mons = $monstat->{monmap}->{mons};
foreach my $d (@$mons) {
next if !defined($d->{name});
$monhash->{$d->{name}}->{rank} = $d->{rank};
$monhash->{$d->{name}}->{addr} = $d->{addr};
if (grep { $_ eq $d->{rank} } @{$monstat->{quorum}}) {
$monhash->{$d->{name}}->{quorum} = 1;
}
}
};
warn $@ if $@;
return PVE::RESTHandler::hash_to_array($monhash, 'name');
}});
__PACKAGE__->register_method ({
name => 'init',
path => 'init',
method => 'POST',
description => "Create initial ceph default configuration and setup symlinks.",
proxyto => 'node',
protected => 1,
parameters => {
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
network => {
description => "Use specific network for all ceph related traffic",
type => 'string', format => 'CIDR',
optional => 1,
maxLength => 128,
},
size => {
description => 'Number of replicas per object',
type => 'integer',
default => 2,
optional => 1,
minimum => 1,
maximum => 3,
},
pg_bits => {
description => "Placement group bits, used to specify the default number of placement groups (Note: 'osd pool default pg num' does not work for deafult pools)",
type => 'integer',
default => 6,
optional => 1,
minimum => 6,
maximum => 14,
},
},
},
returns => { type => 'null' },
code => sub {
my ($param) = @_;
PVE::CephTools::check_ceph_installed();
# simply load old config if it already exists
my $cfg = PVE::CephTools::parse_ceph_config();
if (!$cfg->{global}) {
my $fsid;
my $uuid;
UUID::generate($uuid);
UUID::unparse($uuid, $fsid);
$cfg->{global} = {
'fsid' => $fsid,
'auth supported' => 'cephx',
'auth cluster required' => 'cephx',
'auth service required' => 'cephx',
'auth client required' => 'cephx',
'filestore xattr use omap' => 'true',
'osd journal size' => $pve_osd_default_journal_size,
'osd pool default min size' => 1,
};
# this does not work for default pools
#'osd pool default pg num' => $pg_num,
#'osd pool default pgp num' => $pg_num,
}
$cfg->{global}->{keyring} = '/etc/pve/priv/$cluster.$name.keyring';
$cfg->{osd}->{keyring} = '/var/lib/ceph/osd/ceph-$id/keyring';
$cfg->{global}->{'osd pool default size'} = $param->{size} if $param->{size};
if ($param->{pg_bits}) {
$cfg->{global}->{'osd pg bits'} = $param->{pg_bits};
$cfg->{global}->{'osd pgp bits'} = $param->{pg_bits};
}
if ($param->{network}) {
$cfg->{global}->{'public network'} = $param->{network};
$cfg->{global}->{'cluster network'} = $param->{network};
}
PVE::CephTools::write_ceph_config($cfg);
PVE::CephTools::setup_pve_symlinks();
return undef;
}});
my $find_node_ip = sub {
my ($cidr) = @_;
my $config = PVE::INotify::read_file('interfaces');
my $net = Net::IP->new($cidr) || die Net::IP::Error() . "\n";
foreach my $iface (keys %$config) {
my $d = $config->{$iface};
next if !$d->{address};
my $a = Net::IP->new($d->{address});
next if !$a;
return $d->{address} if $net->overlaps($a);
}
die "unable to find local address within network '$cidr'\n";
};
__PACKAGE__->register_method ({
name => 'createmon',
path => 'mon',
method => 'POST',
description => "Create Ceph Monitor",
proxyto => 'node',
protected => 1,
parameters => {
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
},
},
returns => { type => 'string' },
code => sub {
my ($param) = @_;
PVE::CephTools::check_ceph_inited();
PVE::CephTools::setup_pve_symlinks();
my $rpcenv = PVE::RPCEnvironment::get();
my $authuser = $rpcenv->get_user();
my $cfg = PVE::CephTools::parse_ceph_config();
my $moncount = 0;
my $monaddrhash = {};
foreach my $section (keys %$cfg) {
next if $section eq 'global';
my $d = $cfg->{$section};
if ($section =~ m/^mon\./) {
$moncount++;
if ($d->{'mon addr'}) {
$monaddrhash->{$d->{'mon addr'}} = $section;
}
}
}
my $monid; my $monid;
for (my $i = 0; $i < 7; $i++) { for (my $i = 0; $i < 7; $i++) {
...@@ -603,7 +852,7 @@ __PACKAGE__->register_method ({ ...@@ -603,7 +852,7 @@ __PACKAGE__->register_method ({
mkdir $mondir; mkdir $mondir;
if ($moncount > 0) { if ($moncount > 0) {
my $rados = PVE::RADOS->new(timeout => $long_rados_timeout); my $rados = PVE::RADOS->new(timeout => PVE::CephTools::get_config('long_rados_timeout'));
my $mapdata = $rados->mon_command({ prefix => 'mon getmap', format => 'plain' }); my $mapdata = $rados->mon_command({ prefix => 'mon getmap', format => 'plain' });
PVE::Tools::file_set_contents($monmap, $mapdata); PVE::Tools::file_set_contents($monmap, $mapdata);
} else { } else {
...@@ -682,7 +931,7 @@ __PACKAGE__->register_method ({ ...@@ -682,7 +931,7 @@ __PACKAGE__->register_method ({
my $upid = shift; my $upid = shift;
# reopen with longer timeout # reopen with longer timeout
$rados = PVE::RADOS->new(timeout => $long_rados_timeout); $rados = PVE::RADOS->new(timeout => PVE::CephTools::get_config('long_rados_timeout'));
$rados->mon_command({ prefix => "mon remove", name => $monid, format => 'plain' }); $rados->mon_command({ prefix => "mon remove", name => $monid, format => 'plain' });
...@@ -861,453 +1110,142 @@ __PACKAGE__->register_method ({ ...@@ -861,453 +1110,142 @@ __PACKAGE__->register_method ({
__PACKAGE__->register_method ({ __PACKAGE__->register_method ({
name => 'createpool', name => 'createpool',
path => 'pools', path => 'pools',
method => 'POST',
description => "Create POOL",
proxyto => 'node',
protected => 1,
parameters => {
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
name => {
description => "The name of the pool. It must be unique.",
type => 'string',
},
size => {
description => 'Number of replicas per object',
type => 'integer',
default => 2,
optional => 1,
minimum => 1,
maximum => 3,
},
min_size => {
description => 'Minimum number of replicas per object',
type => 'integer',
default => 1,
optional => 1,
minimum => 1,
maximum => 3,
},
pg_num => {
description => "Number of placement groups.",
type => 'integer',
default => 64,
optional => 1,
minimum => 8,
maximum => 32768,
},
crush_ruleset => {
description => "The ruleset to use for mapping object placement in the cluster.",
type => 'integer',
minimum => 0,
maximum => 32768,
default => 0,
optional => 1,
},
},
},
returns => { type => 'null' },
code => sub {
my ($param) = @_;
PVE::CephTools::check_ceph_inited();
my $pve_ckeyring_path = PVE::CephTools::get_config('pve_ckeyring_path');
die "not fully configured - missing '$pve_ckeyring_path'\n"
if ! -f $pve_ckeyring_path;
my $pg_num = $param->{pg_num} || 64;
my $size = $param->{size} || 2;
my $min_size = $param->{min_size} || 1;
my $ruleset = $param->{crush_ruleset} || 0;
my $rados = PVE::RADOS->new();
$rados->mon_command({
prefix => "osd pool create",
pool => $param->{name},
pg_num => int($pg_num),
# this does not work for unknown reason
# properties => ["size=$size", "min_size=$min_size", "crush_ruleset=$ruleset"],
format => 'plain',
});
$rados->mon_command({
prefix => "osd pool set",
pool => $param->{name},
var => 'min_size',
val => $min_size,
format => 'plain',
});
$rados->mon_command({
prefix => "osd pool set",
pool => $param->{name},
var => 'size',
val => $size,
format => 'plain',
});
if (defined($param->{crush_ruleset})) {
$rados->mon_command({
prefix => "osd pool set",
pool => $param->{name},
var => 'crush_ruleset',
val => $param->{crush_ruleset},
format => 'plain',
});
}
return undef;
}});
__PACKAGE__->register_method ({
name => 'destroypool',
path => 'pools/{name}',
method => 'DELETE',
description => "Destroy pool",
proxyto => 'node',
protected => 1,
parameters => {
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
name => {
description => "The name of the pool. It must be unique.",
type => 'string',
},
},
},
returns => { type => 'null' },
code => sub {
my ($param) = @_;
PVE::CephTools::check_ceph_inited();
my $rados = PVE::RADOS->new();
# fixme: '--yes-i-really-really-mean-it'
$rados->mon_command({
prefix => "osd pool delete",
pool => $param->{name},
pool2 => $param->{name},
sure => '--yes-i-really-really-mean-it',
format => 'plain',
});
return undef;
}});
__PACKAGE__->register_method ({
name => 'listosd',
path => 'osd',
method => 'GET',
description => "Get Ceph osd list/tree.",
proxyto => 'node',
protected => 1,
parameters => {
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
},
},
returns => {
type => "object",
},
code => sub {
my ($param) = @_;
PVE::CephTools::check_ceph_inited();
my $rados = PVE::RADOS->new();
my $res = $rados->mon_command({ prefix => 'osd tree' });
die "no tree nodes found\n" if !($res && $res->{nodes});
my $nodes = {};
my $newnodes = {};
foreach my $e (@{$res->{nodes}}) {
$nodes->{$e->{id}} = $e;
my $new = {
id => $e->{id},
name => $e->{name},
type => $e->{type}
};
foreach my $opt (qw(status crush_weight reweight)) {
$new->{$opt} = $e->{$opt} if defined($e->{$opt});
}
$newnodes->{$e->{id}} = $new;
}
foreach my $e (@{$res->{nodes}}) {
my $new = $newnodes->{$e->{id}};
if ($e->{children} && scalar(@{$e->{children}})) {
$new->{children} = [];
$new->{leaf} = 0;
foreach my $cid (@{$e->{children}}) {
$nodes->{$cid}->{parent} = $e->{id};
if ($nodes->{$cid}->{type} eq 'osd' &&
$e->{type} eq 'host') {
$newnodes->{$cid}->{host} = $e->{name};
}
push @{$new->{children}}, $newnodes->{$cid};
}
} else {
$new->{leaf} = ($e->{id} >= 0) ? 1 : 0;
}
}
my $rootnode;
foreach my $e (@{$res->{nodes}}) {
if (!$nodes->{$e->{id}}->{parent}) {
$rootnode = $newnodes->{$e->{id}};
last;
}
}
die "no root node\n" if !$rootnode;
my $data = { root => $rootnode };
return $data;
}});
__PACKAGE__->register_method ({
name => 'createosd',
path => 'osd',
method => 'POST', method => 'POST',
description => "Create OSD", description => "Create POOL",
proxyto => 'node', proxyto => 'node',
protected => 1, protected => 1,
parameters => { parameters => {
additionalProperties => 0, additionalProperties => 0,
properties => { properties => {
node => get_standard_option('pve-node'), node => get_standard_option('pve-node'),
dev => { name => {
description => "Block device name.", description => "The name of the pool. It must be unique.",
type => 'string', type => 'string',
}, },
journal_dev => { size => {
description => "Block device name for journal.", description => 'Number of replicas per object',
type => 'integer',
default => 2,
optional => 1, optional => 1,
type => 'string', minimum => 1,
maximum => 3,
}, },
fstype => { min_size => {
description => "File system type.", description => 'Minimum number of replicas per object',
type => 'string', type => 'integer',
enum => ['xfs', 'ext4', 'btrfs'], default => 1,
default => 'xfs', optional => 1,
minimum => 1,
maximum => 3,
},
pg_num => {
description => "Number of placement groups.",
type => 'integer',
default => 64,
optional => 1,
minimum => 8,
maximum => 32768,
},
crush_ruleset => {
description => "The ruleset to use for mapping object placement in the cluster.",
type => 'integer',
minimum => 0,
maximum => 32768,
default => 0,
optional => 1, optional => 1,
}, },
}, },
}, },
returns => { type => 'string' }, returns => { type => 'null' },
code => sub { code => sub {
my ($param) = @_; my ($param) = @_;
my $rpcenv = PVE::RPCEnvironment::get();
my $authuser = $rpcenv->get_user();
PVE::CephTools::check_ceph_inited(); PVE::CephTools::check_ceph_inited();
PVE::CephTools::setup_pve_symlinks(); my $pve_ckeyring_path = PVE::CephTools::get_config('pve_ckeyring_path');
my $journal_dev;
if ($param->{journal_dev} && ($param->{journal_dev} ne $param->{dev})) {
$journal_dev = PVE::CephTools::verify_blockdev_path($param->{journal_dev});
}
$param->{dev} = PVE::CephTools::verify_blockdev_path($param->{dev});
my $disklist = list_disks();
my $devname = $param->{dev};
$devname =~ s|/dev/||;
my $diskinfo = $disklist->{$devname};
die "unable to get device info for '$devname'\n"
if !$diskinfo;
die "device '$param->{dev}' is in use\n" die "not fully configured - missing '$pve_ckeyring_path'\n"
if $diskinfo->{used}; if ! -f $pve_ckeyring_path;
my $pg_num = $param->{pg_num} || 64;
my $size = $param->{size} || 2;
my $min_size = $param->{min_size} || 1;
my $ruleset = $param->{crush_ruleset} || 0;
my $rados = PVE::RADOS->new(); my $rados = PVE::RADOS->new();
my $monstat = $rados->mon_command({ prefix => 'mon_status' });
die "unable to get fsid\n" if !$monstat->{monmap} || !$monstat->{monmap}->{fsid};
my $fsid = $monstat->{monmap}->{fsid};
$fsid = $1 if $fsid =~ m/^([0-9a-f\-]+)$/;
my $ceph_bootstrap_osd_keyring = PVE::CephTools::get_config('ceph_bootstrap_osd_keyring'); $rados->mon_command({
prefix => "osd pool create",
if (! -f $ceph_bootstrap_osd_keyring) { pool => $param->{name},
my $bindata = $rados->mon_command({ prefix => 'auth get client.bootstrap-osd', format => 'plain' }); pg_num => int($pg_num),
PVE::Tools::file_set_contents($ceph_bootstrap_osd_keyring, $bindata); # this does not work for unknown reason
}; # properties => ["size=$size", "min_size=$min_size", "crush_ruleset=$ruleset"],
format => 'plain',
my $worker = sub { });
my $upid = shift;
my $fstype = $param->{fstype} || 'xfs';
print "create OSD on $param->{dev} ($fstype)\n";
my $ccname = PVE::CephTools::get_config('ccname'); $rados->mon_command({
prefix => "osd pool set",
pool => $param->{name},
var => 'min_size',
val => $min_size,
format => 'plain',
});
my $cmd = ['ceph-disk', 'prepare', '--zap-disk', '--fs-type', $fstype, $rados->mon_command({
'--cluster', $ccname, '--cluster-uuid', $fsid ]; prefix => "osd pool set",
pool => $param->{name},
var => 'size',
val => $size,
format => 'plain',
});
if ($journal_dev) { if (defined($param->{crush_ruleset})) {
print "using device '$journal_dev' for journal\n"; $rados->mon_command({
push @$cmd, '--journal-dev', $param->{dev}, $journal_dev; prefix => "osd pool set",
} else { pool => $param->{name},
push @$cmd, $param->{dev}; var => 'crush_ruleset',
} val => $param->{crush_ruleset},
format => 'plain',
run_command($cmd); });
}; }
return $rpcenv->fork_worker('cephcreateosd', $devname, $authuser, $worker); return undef;
}}); }});
__PACKAGE__->register_method ({ __PACKAGE__->register_method ({
name => 'destroyosd', name => 'destroypool',
path => 'osd/{osdid}', path => 'pools/{name}',
method => 'DELETE', method => 'DELETE',
description => "Destroy OSD", description => "Destroy pool",
proxyto => 'node', proxyto => 'node',
protected => 1, protected => 1,
parameters => { parameters => {
additionalProperties => 0, additionalProperties => 0,
properties => { properties => {
node => get_standard_option('pve-node'), node => get_standard_option('pve-node'),
osdid => { name => {
description => 'OSD ID', description => "The name of the pool. It must be unique.",
type => 'integer', type => 'string',
},
cleanup => {
description => "If set, we remove partition table entries.",
type => 'boolean',
optional => 1,
default => 0,
}, },
}, },
}, },
returns => { type => 'string' }, returns => { type => 'null' },
code => sub { code => sub {
my ($param) = @_; my ($param) = @_;
my $rpcenv = PVE::RPCEnvironment::get();
my $authuser = $rpcenv->get_user();
PVE::CephTools::check_ceph_inited(); PVE::CephTools::check_ceph_inited();
my $osdid = $param->{osdid};
# fixme: not 100% sure what we should do here
my $rados = PVE::RADOS->new(); my $rados = PVE::RADOS->new();
my $stat = $rados->mon_command({ prefix => 'osd dump' }); # fixme: '--yes-i-really-really-mean-it'
$rados->mon_command({
my $osdlist = $stat->{osds} || []; prefix => "osd pool delete",
pool => $param->{name},
my $osdstat; pool2 => $param->{name},
foreach my $d (@$osdlist) { sure => '--yes-i-really-really-mean-it',
if ($d->{osd} == $osdid) { format => 'plain',
$osdstat = $d; });
last;
}
}
die "no such OSD '$osdid'\n" if !$osdstat;
die "osd is in use (in == 1)\n" if $osdstat->{in};
#&$run_ceph_cmd(['osd', 'out', $osdid]);
die "osd is still runnung (up == 1)\n" if $osdstat->{up};
my $osdsection = "osd.$osdid";
my $worker = sub {
my $upid = shift;
# reopen with longer timeout
$rados = PVE::RADOS->new(timeout => $long_rados_timeout);
print "destroy OSD $osdsection\n";
eval { PVE::CephTools::ceph_service_cmd('stop', $osdsection); };
warn $@ if $@;
print "Remove $osdsection from the CRUSH map\n";
$rados->mon_command({ prefix => "osd crush remove", name => $osdsection, format => 'plain' });
print "Remove the $osdsection authentication key.\n";
$rados->mon_command({ prefix => "auth del", entity => $osdsection, format => 'plain' });
print "Remove OSD $osdsection\n";
$rados->mon_command({ prefix => "osd rm", ids => "$osdid", format => 'plain' });
# try to unmount from standard mount point
my $mountpoint = "/var/lib/ceph/osd/ceph-$osdid";
my $remove_partition = sub {
my ($disklist, $part) = @_;
return if !$part || (! -b $part );
foreach my $real_dev (keys %$disklist) {
my $diskinfo = $disklist->{$real_dev};
next if !$diskinfo->{gpt};
if ($part =~ m|^/dev/${real_dev}(\d+)$|) {
my $partnum = $1;
print "remove partition $part (disk '/dev/${real_dev}', partnum $partnum)\n";
eval { run_command(['/sbin/sgdisk', '-d', $partnum, "/dev/${real_dev}"]); };
warn $@ if $@;
last;
}
}
};
my $journal_part;
my $data_part;
if ($param->{cleanup}) {
my $jpath = "$mountpoint/journal";
$journal_part = abs_path($jpath);
if (my $fd = IO::File->new("/proc/mounts", "r")) {
while (defined(my $line = <$fd>)) {
my ($dev, $path, $fstype) = split(/\s+/, $line);
next if !($dev && $path && $fstype);
next if $dev !~ m|^/dev/|;
if ($path eq $mountpoint) {
$data_part = abs_path($dev);
last;
}
}
close($fd);
}
}
print "Unmount OSD $osdsection from $mountpoint\n";
eval { run_command(['umount', $mountpoint]); };
if (my $err = $@) {
warn $err;
} elsif ($param->{cleanup}) {
my $disklist = list_disks();
&$remove_partition($disklist, $journal_part);
&$remove_partition($disklist, $data_part);
}
};
return $rpcenv->fork_worker('cephdestroyosd', $osdsection, $authuser, $worker); return undef;
}}); }});
......
...@@ -6,8 +6,9 @@ use File::Basename; ...@@ -6,8 +6,9 @@ use File::Basename;
use File::Path; use File::Path;
use POSIX qw (LONG_MAX); use POSIX qw (LONG_MAX);
use Cwd qw(abs_path); use Cwd qw(abs_path);
use IO::Dir;
use PVE::Tools; use PVE::Tools qw(extract_param run_command file_get_contents file_read_firstline dir_glob_regex dir_glob_foreach);
my $ccname = 'ceph'; # ceph cluster name my $ccname = 'ceph'; # ceph cluster name
my $ceph_cfgdir = "/etc/ceph"; my $ceph_cfgdir = "/etc/ceph";
...@@ -28,6 +29,7 @@ my $config_hash = { ...@@ -28,6 +29,7 @@ my $config_hash = {
pve_ckeyring_path => $pve_ckeyring_path, pve_ckeyring_path => $pve_ckeyring_path,
ceph_bootstrap_osd_keyring => $ceph_bootstrap_osd_keyring, ceph_bootstrap_osd_keyring => $ceph_bootstrap_osd_keyring,
ceph_bootstrap_mds_keyring => $ceph_bootstrap_mds_keyring, ceph_bootstrap_mds_keyring => $ceph_bootstrap_mds_keyring,
long_rados_timeout => 60,
}; };
sub get_config { sub get_config {
...@@ -187,4 +189,147 @@ sub ceph_service_cmd { ...@@ -187,4 +189,147 @@ sub ceph_service_cmd {
PVE::Tools::run_command(['service', 'ceph', '-c', $pve_ceph_cfgpath, @_]); PVE::Tools::run_command(['service', 'ceph', '-c', $pve_ceph_cfgpath, @_]);
} }
sub list_disks {
my $disklist = {};
my $fd = IO::File->new("/proc/mounts", "r") ||
die "unable to open /proc/mounts - $!\n";
my $mounted = {};
while (defined(my $line = <$fd>)) {
my ($dev, $path, $fstype) = split(/\s+/, $line);
next if !($dev && $path && $fstype);
next if $dev !~ m|^/dev/|;
my $real_dev = abs_path($dev);
$mounted->{$real_dev} = $path;
}
close($fd);
my $dev_is_mounted = sub {
my ($dev) = @_;
return $mounted->{$dev};
};
my $dir_is_epmty = sub {
my ($dir) = @_;
my $dh = IO::Dir->new ($dir);
return 1 if !$dh;
while (defined(my $tmp = $dh->read)) {
next if $tmp eq '.' || $tmp eq '..';
$dh->close;
return 0;
}
$dh->close;
return 1;
};
my $journal_uuid = '45b0969e-9b03-4f30-b4c6-b4b80ceff106';
my $journalhash = {};
dir_glob_foreach('/dev/disk/by-parttypeuuid', "$journal_uuid\..+", sub {
my ($entry) = @_;
my $real_dev = abs_path("/dev/disk/by-parttypeuuid/$entry");
$journalhash->{$real_dev} = 1;
});
dir_glob_foreach('/sys/block', '.*', sub {
my ($dev) = @_;
return if $dev eq '.';
return if $dev eq '..';
return if $dev =~ m|^ram\d+$|; # skip ram devices
return if $dev =~ m|^loop\d+$|; # skip loop devices
return if $dev =~ m|^md\d+$|; # skip md devices
return if $dev =~ m|^dm-.*$|; # skip dm related things
return if $dev =~ m|^fd\d+$|; # skip Floppy
return if $dev =~ m|^sr\d+$|; # skip CDs
my $devdir = "/sys/block/$dev/device";
return if ! -d $devdir;
my $size = file_read_firstline("/sys/block/$dev/size");
return if !$size;
$size = $size * 512;
my $info = `udevadm info --path /sys/block/$dev --query all`;
return if !$info;
return if $info !~ m/^E: DEVTYPE=disk$/m;
return if $info =~ m/^E: ID_CDROM/m;
my $serial = 'unknown';
if ($info =~ m/^E: ID_SERIAL_SHORT=(\S+)$/m) {
$serial = $1;
}
my $gpt = 0;
if ($info =~ m/^E: ID_PART_TABLE_TYPE=gpt$/m) {
$gpt = 1;
}
# detect SSD (fixme - currently only works for ATA disks)
my $rpm = 7200; # default guess
if ($info =~ m/^E: ID_ATA_ROTATION_RATE_RPM=(\d+)$/m) {
$rpm = $1;
}
my $vendor = file_read_firstline("$devdir/vendor") || 'unknown';
my $model = file_read_firstline("$devdir/model") || 'unknown';
my $used;
$used = 'LVM' if !&$dir_is_epmty("/sys/block/$dev/holders");
$used = 'mounted' if &$dev_is_mounted("/dev/$dev");
$disklist->{$dev} = {
vendor => $vendor,
model => $model,
size => $size,
serial => $serial,
gpt => $gpt,
rmp => $rpm,
};
my $osdid = -1;
my $journal_count = 0;
my $found_partitions;
my $found_lvm;
my $found_mountpoints;
dir_glob_foreach("/sys/block/$dev", "$dev.+", sub {
my ($part) = @_;
$found_partitions = 1;
if (my $mp = &$dev_is_mounted("/dev/$part")) {
$found_mountpoints = 1;
if ($mp =~ m|^/var/lib/ceph/osd/ceph-(\d+)$|) {
$osdid = $1;
}
}
if (!&$dir_is_epmty("/sys/block/$dev/$part/holders")) {
$found_lvm = 1;
}
$journal_count++ if $journalhash->{"/dev/$part"};
});
$used = 'mounted' if $found_mountpoints && !$used;
$used = 'LVM' if $found_lvm && !$used;
$used = 'partitions' if $found_partitions && !$used;
$disklist->{$dev}->{used} = $used if $used;
$disklist->{$dev}->{osdid} = $osdid;
$disklist->{$dev}->{journals} = $journal_count;
});
return $disklist;
}
1; 1;
...@@ -124,8 +124,8 @@ my $cmddef = { ...@@ -124,8 +124,8 @@ my $cmddef = {
}], }],
createpool => [ 'PVE::API2::Ceph', 'createpool', ['name'], { node => $nodename }], createpool => [ 'PVE::API2::Ceph', 'createpool', ['name'], { node => $nodename }],
destroypool => [ 'PVE::API2::Ceph', 'destroypool', ['name'], { node => $nodename } ], destroypool => [ 'PVE::API2::Ceph', 'destroypool', ['name'], { node => $nodename } ],
createosd => [ 'PVE::API2::Ceph', 'createosd', ['dev'], { node => $nodename }, $upid_exit], createosd => [ 'PVE::API2::CephOSD', 'createosd', ['dev'], { node => $nodename }, $upid_exit],
destroyosd => [ 'PVE::API2::Ceph', 'destroyosd', ['osdid'], { node => $nodename }, $upid_exit], destroyosd => [ 'PVE::API2::CephOSD', 'destroyosd', ['osdid'], { node => $nodename }, $upid_exit],
createmon => [ 'PVE::API2::Ceph', 'createmon', [], { node => $nodename }, $upid_exit], createmon => [ 'PVE::API2::Ceph', 'createmon', [], { node => $nodename }, $upid_exit],
destroymon => [ 'PVE::API2::Ceph', 'destroymon', ['monid'], { node => $nodename }, $upid_exit], destroymon => [ 'PVE::API2::Ceph', 'destroymon', ['monid'], { node => $nodename }, $upid_exit],
start => [ 'PVE::API2::Ceph', 'start', ['service'], { node => $nodename }, $upid_exit], start => [ 'PVE::API2::Ceph', 'start', ['service'], { node => $nodename }, $upid_exit],
......
...@@ -292,6 +292,42 @@ Ext.define('PVE.node.CephOsdTree', { ...@@ -292,6 +292,42 @@ Ext.define('PVE.node.CephOsdTree', {
var sm = Ext.create('Ext.selection.TreeModel', {}); var sm = Ext.create('Ext.selection.TreeModel', {});
var set_button_status; // defined later
var reload = function() {
PVE.Utils.API2Request({
url: "/nodes/" + nodename + "/ceph/osd",
waitMsgTarget: me,
method: 'GET',
failure: function(response, opts) {
PVE.Utils.setErrorMask(me, response.htmlStatus);
},
success: function(response, opts) {
sm.deselectAll();
me.setRootNode(response.result.data.root);
me.expandAll();
set_button_status();
}
});
};
var osd_cmd = function(cmd) {
var rec = sm.getSelection()[0];
if (!(rec && (rec.data.id >= 0) && rec.data.host)) {
return;
}
PVE.Utils.API2Request({
url: "/nodes/" + rec.data.host + "/ceph/osd/" +
rec.data.id + '/' + cmd,
waitMsgTarget: me,
method: 'POST',
success: reload,
failure: function(response, opts) {
Ext.Msg.alert(gettext('Error'), response.htmlStatus);
}
});
};
var service_cmd = function(cmd) { var service_cmd = function(cmd) {
var rec = sm.getSelection()[0]; var rec = sm.getSelection()[0];
if (!(rec && rec.data.name && rec.data.host)) { if (!(rec && rec.data.name && rec.data.host)) {
...@@ -302,6 +338,7 @@ Ext.define('PVE.node.CephOsdTree', { ...@@ -302,6 +338,7 @@ Ext.define('PVE.node.CephOsdTree', {
params: { service: rec.data.name }, params: { service: rec.data.name },
waitMsgTarget: me, waitMsgTarget: me,
method: 'POST', method: 'POST',
success: reload,
failure: function(response, opts) { failure: function(response, opts) {
Ext.Msg.alert(gettext('Error'), response.htmlStatus); Ext.Msg.alert(gettext('Error'), response.htmlStatus);
} }
...@@ -320,6 +357,18 @@ Ext.define('PVE.node.CephOsdTree', { ...@@ -320,6 +357,18 @@ Ext.define('PVE.node.CephOsdTree', {
handler: function(){ service_cmd('stop'); } handler: function(){ service_cmd('stop'); }
}); });
var osd_out_btn = new Ext.Button({
text: 'Out',
disabled: true,
handler: function(){ osd_cmd('out'); }
});
var osd_in_btn = new Ext.Button({
text: 'In',
disabled: true,
handler: function(){ osd_cmd('in'); }
});
var remove_btn = new Ext.Button({ var remove_btn = new Ext.Button({
text: gettext('Remove'), text: gettext('Remove'),
disabled: true, disabled: true,
...@@ -334,16 +383,19 @@ Ext.define('PVE.node.CephOsdTree', { ...@@ -334,16 +383,19 @@ Ext.define('PVE.node.CephOsdTree', {
osdid: rec.data.id osdid: rec.data.id
}); });
win.show(); win.show();
me.mon(win, 'close', reload, me);
} }
}); });
var set_button_status = function() { set_button_status = function() {
var rec = sm.getSelection()[0]; var rec = sm.getSelection()[0];
if (!rec) { if (!rec) {
start_btn.setDisabled(true); start_btn.setDisabled(true);
stop_btn.setDisabled(true); stop_btn.setDisabled(true);
remove_btn.setDisabled(true); remove_btn.setDisabled(true);
osd_out_btn.setDisabled(true);
osd_in_btn.setDisabled(true);
return; return;
} }
...@@ -352,37 +404,23 @@ Ext.define('PVE.node.CephOsdTree', { ...@@ -352,37 +404,23 @@ Ext.define('PVE.node.CephOsdTree', {
start_btn.setDisabled(!(isOsd && (rec.data.status !== 'up'))); start_btn.setDisabled(!(isOsd && (rec.data.status !== 'up')));
stop_btn.setDisabled(!(isOsd && (rec.data.status !== 'down'))); stop_btn.setDisabled(!(isOsd && (rec.data.status !== 'down')));
remove_btn.setDisabled(!(isOsd && (rec.data.status === 'down'))); remove_btn.setDisabled(!(isOsd && (rec.data.status === 'down')));
osd_out_btn.setDisabled(!(isOsd && rec.data['in']));
osd_in_btn.setDisabled(!(isOsd && !rec.data['in']));
}; };
sm.on('selectionchange', set_button_status); sm.on('selectionchange', set_button_status);
var reload = function() {
PVE.Utils.API2Request({
url: "/nodes/" + nodename + "/ceph/osd",
waitMsgTarget: me,
method: 'GET',
failure: function(response, opts) {
PVE.Utils.setErrorMask(me, response.htmlStatus);
},
success: function(response, opts) {
sm.deselectAll();
me.setRootNode(response.result.data.root);
me.expandAll();
set_button_status();
}
});
};
var reload_btn = new Ext.Button({ var reload_btn = new Ext.Button({
text: gettext('Reload'), text: gettext('Reload'),
handler: reload handler: reload
}); });
Ext.apply(me, { Ext.apply(me, {
tbar: [ reload_btn, start_btn, stop_btn, remove_btn ], tbar: [ reload_btn, start_btn, stop_btn, osd_out_btn, osd_in_btn, remove_btn ],
rootVisible: false, rootVisible: false,
fields: ['name', 'type', 'status', 'host', fields: ['name', 'type', 'status', 'host', 'in',
{ type: 'integre', name: 'id' }, { type: 'integer', name: 'id' },
{ type: 'number', name: 'reweight' }, { type: 'number', name: 'reweight' },
{ type: 'number', name: 'crush_weight' }], { type: 'number', name: 'crush_weight' }],
stateful: false, stateful: false,
...@@ -416,6 +454,13 @@ Ext.define('PVE.node.CephOsdTree', { ...@@ -416,6 +454,13 @@ Ext.define('PVE.node.CephOsdTree', {
text: 'Status', text: 'Status',
dataIndex: 'status', dataIndex: 'status',
align: 'right', align: 'right',
renderer: function(value, metaData, rec) {
if (!value) {
return value;
}
var data = rec.data;
return value + '/' + (data['in'] ? 'in' : 'out');
},
width: 100 width: 100
}, },
{ {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment