Cephは記録cephを最適化する.conf最適化の詳細

5511 ワード

いくつかのceph最適化の設定を記録して、一部の内容はネットから来て、一部は自分の需要に応じて相応に修正してください
1、  Kernel pid max
echo 4194303 >/proc/sys/kernel/pid_max
2、MTUを設置し、スイッチ側はこの機能をサポートする必要があり、システムネットワークカードの設置は効果がある
プロファイル追加MTU=9000
3、  read_aheadは、データをプリリードし、ランダムアクセスメモリに記載することでディスクの読み取り操作を向上させる
echo “8192” >/sys/block/sda/queue/read_ahead_kb
4、swappiness、主な制御システムのswapの使用
echo “vm.swappiness = 0″/etc/sysctl.conf ;  sysctl –p
5、I/O Scheduler、SSDはnoop、SATA/SASはdeadlineを使う
echo “deadline” >/sys/block/sd[x]/queue/scheduler
echo “noop” >/sys/block/sd[x]/queue/scheduler
6、  ceph.conf構成オプション
[global]#    
fsid = xxxxxxxxxxxxxxx                           #    ID 
mon host = 10.0.1.1,10.0.1.2,10.0.1.3            #monitor IP   
auth cluster required = cephx                    #    
auth service required = cephx                           #    
auth client required = cephx                            #     
osd pool default size = 3                             #         3
osd pool default min size = 1                           #PG    degraded        IO   ,min_size   PG   IO      
public network = 10.0.1.0/24                            #    (monitorIP ) 
cluster network = 10.0.2.0/24                           #    
max open files = 131072                                 #  0#        ,Ceph      max open fds
mon initial members = node1, node2, node3               #  monitor (   monitor    )
##############################################################
[mon]
mon data = /var/lib/ceph/mon/ceph-$id
mon clock drift allowed = 1                             #   0.05#monitor  clock drift
mon osd min down reporters = 13                         #   1# monitor  down   OSD 
mon osd down out interval = 600      #   300      #    OSD   down out  ceph     
##############################################################
[osd]
osd data = /var/lib/ceph/osd/ceph-$id
osd journal size = 20000 #  5120                      #osd journal  
osd journal = /var/lib/ceph/osd/$cluster-$id/journal #osd journal   
osd mkfs type = xfs                                     #       
osd max write size = 512 #   90                   #OSD         (MB)
osd client message size cap = 2147483648 #   100    #              (bytes)
osd deep scrub stride = 131072 #   524288         # Deep Scrub          (bytes)
osd op threads = 16 #   2                         #         
osd disk threads = 4 #   1                        #OSD          Scrubbing    
osd map cache size = 1024 #   500                 #  OSD Map   (MB)
osd map cache bl size = 128 #   50                #OSD       OSD Map  (MB)
osd mount options xfs = "rw,noexec,nodev,noatime,nodiratime,nobarrier" #   rw,noatime,inode64  #Ceph OSD xfs Mount  
osd recovery op priority = 2 #   10              #       ,  1-63,         
osd recovery max active = 10 #   15              #              
osd max backfills = 4  #   10                  #  OSD     backfills 
osd min pg log entries = 30000 #   3000           #  PGLog      PGLog 
osd max pg log entries = 100000 #   10000         #  PGLog      PGLog 
osd mon heartbeat interval = 40 #   30            #OSD ping  monitor     (  30s)
ms dispatch throttle bytes = 1048576000 #    104857600 #          
objecter inflight ops = 819200 #   1024           #     ,        io   ,         io, 0     
osd op log threshold = 50 #   5                  #         log
osd crush chooseleaf type = 0 #    1              #CRUSH    chooseleaf  bucket   
filestore xattr use omap = true                         #  false# XATTRS  object map,EXT4       ,XFS  btrfs     
filestore min sync interval = 10                        #  0.1#             (seconds)
filestore max sync interval = 15                        #  5#             (seconds)
filestore queue max ops = 25000                        #  500#           
filestore queue max bytes = 1048576000      #  100   #            (bytes
filestore queue committing max ops = 50000 #  500     #     commit    
filestore queue committing max bytes = 10485760000 #  100 #     commit      (bytes)
filestore split multiple = 8 #   2                  #                     
filestore merge threshold = 40 #   10               #                     
filestore fd cache size = 1024 #   128              #          
filestore op threads = 32  #   2                    #         
journal max write bytes = 1073714824 #   1048560    #journal           (bytes)
journal max write entries = 10000 #   100         #journal           
journal queue max ops = 50000  #   50            #journal             
journal queue max bytes = 10485760000 #   33554432   #journal             (bytes)
##############################################################
[client]
rbd cache = true #    true      #RBD  
rbd cache size = 335544320 #   33554432           #RBD    (bytes)
rbd cache max dirty = 134217728 #   25165824      #   write-back      dirty   (bytes),   0,  write-through
rbd cache max dirty age = 30 #   1                #         dirty         (seconds)
rbd cache writethrough until flush = false #   true  #        linux-2.6.32   virtio  ,       flush  ,     
              #      ,librbd  writethrough     io,       flush  ,    writeback  。
rbd cache max dirty object = 2 #   0              #   Object   ,   0,    rbd cache size    ,librbd   4MB      Image      
      #  chunk       Object;librbd  Object        ,          
rbd cache target dirty = 235544320 #   16777216    #              ,     rbd_cache_max_dirty
さらに素晴らしい点に注目してください.https://zhangdd.com