ceph 自动创建 osd [关闭]

问题描述 投票:0回答:2

我用 ceph 16.2.7 创建了一个集群,并添加了带有集群扩展的 OSD,然后我按照以下步骤从 crush map 中删除了一些 OSD。此外,选项 --all-available-devices 是非托管的。

enter image description here #ceph orch 应用 osd --all-available-devices --unmanaged=true

    #ceph osd out osd.x
    
    #ceph osd down osd.x
    
    #systemctl stop [email protected]
    
    #ceph osd crush rm osd.x
    
    #ceph osd rm osd.x
    
    #ceph auth rm osd.x
    
    #cd /var/lib/ceph/d813d6b4-6d3c-11ec-a97e-000c2992a0d6/
    
    #rm osd.x -rf
    # cd /etc/systemd/system/ceph-d813d6b4-6d3c-11ec-a97e-000c2992a0d6.target.wants/
    # rm [email protected]
    
    #lvremove /dev/ceph-*

但是当我删除与已删除 OSD 相关的 LVM 时。被删除的 OSD 会自动恢复。我不希望这种情况发生。我想在磁盘上手动创建 OSD。谁能给我解释一下?

[root@ceph2-node-01 ~]# ceph orch ls --export --format yaml
service_type: alertmanager
service_name: alertmanager
placement:
  count: 3
  label: mon
---
service_type: crash
service_name: crash
placement:
  host_pattern: '*'
--- !!python/object:ceph.deployment.service_spec.MonitoringSpec
config: null
networks: []
placement: !!python/object:ceph.deployment.service_spec.PlacementSpec
  count: 3
  count_per_host: null
  host_pattern: null
  hosts:
  - !!python/object/new:ceph.deployment.service_spec.HostPlacementSpec
    - ceph2-node-02
    - ''
    - ''
  - !!python/object/new:ceph.deployment.service_spec.HostPlacementSpec
    - ceph2-node-03
    - ''
    - ''
  label: null
port: null
preview_only: false
service_id: null
service_type: grafana
unmanaged: false
---
service_type: mgr
service_name: mgr
placement:
  count: 2
---
service_type: mon
service_name: mon
placement:
  count: 5
--- !!python/object:ceph.deployment.service_spec.MonitoringSpec
config: null
networks: []
placement: !!python/object:ceph.deployment.service_spec.PlacementSpec
  count: null
  count_per_host: null
  host_pattern: '*'
  hosts: []
  label: null
port: null
preview_only: false
service_id: null
service_type: node-exporter
unmanaged: false
---
service_type: osd
service_id: all-available-devices
service_name: osd.all-available-devices
placement:
  host_pattern: '*'
unmanaged: true
spec:
  data_devices:
    all: true
  filter_logic: AND
  objectstore: bluestore
---
service_type: osd
service_id: dashboard-admin-1642344788791
service_name: osd.dashboard-admin-1642344788791
placement:
  host_pattern: '*'
spec:
  data_devices:
    rotational: true
  db_devices:
    rotational: false
  db_slots: 2
  filter_logic: AND
  objectstore: bluestore
--- !!python/object:ceph.deployment.service_spec.MonitoringSpec
config: null
networks: []
placement: !!python/object:ceph.deployment.service_spec.PlacementSpec
  count: 3
  count_per_host: null
  host_pattern: null
  hosts: []
  label: mon
port: null
preview_only: false
service_id: null
service_type: prometheus
unmanaged: false
ceph object-storage cephadm
2个回答
0
投票

服务类型:osd

service_id: example_drvgrp_name

放置: 主持人:

  • ses-node2
  • ses-node3

加密:真实

非托管:真


0
投票

在这里,有两个规范(osd.all-available-devices 和 osd.dashboard-admin-1642344788791)用于相同的目的。

osd.dashboard-admin-1642344788791
cephadm
管理。尝试取消管理
osd.dashboard-admin-1642344788791
并检查状态。完成后,尝试使用命令
ceph orch osd rm <OSD_ID>
而不是手动命令删除 osd。

---
service_type: osd
service_id: all-available-devices
service_name: osd.all-available-devices
placement:
  host_pattern: '*'
unmanaged: true
spec:
  data_devices:
    all: true
  filter_logic: AND
  objectstore: bluestore
---
service_type: osd
service_id: dashboard-admin-1642344788791
service_name: osd.dashboard-admin-1642344788791
placement:
  host_pattern: '*'
spec:
  data_devices:
    rotational: true
  db_devices:
    rotational: false
  db_slots: 2
  filter_logic: AND
  objectstore: bluestore
© www.soinside.com 2019 - 2024. All rights reserved.