repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
null | ceph-main/src/cls/refcount/cls_refcount_ops.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_CLS_REFCOUNT_OPS_H
#define CEPH_CLS_REFCOUNT_OPS_H
#include "include/types.h"
#include "common/hobject.h"
struct cls_refcount_get_op {
std::string tag;
bool implicit_ref;
cls_refcount_get_op() : implicit_ref(false) {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(tag, bl);
encode(implicit_ref, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(tag, bl);
decode(implicit_ref, bl);
DECODE_FINISH(bl);
}
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<cls_refcount_get_op*>& ls);
};
WRITE_CLASS_ENCODER(cls_refcount_get_op)
struct cls_refcount_put_op {
std::string tag;
bool implicit_ref; // assume wildcard reference for
// objects without a std::set ref
cls_refcount_put_op() : implicit_ref(false) {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(tag, bl);
encode(implicit_ref, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(tag, bl);
decode(implicit_ref, bl);
DECODE_FINISH(bl);
}
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<cls_refcount_put_op*>& ls);
};
WRITE_CLASS_ENCODER(cls_refcount_put_op)
struct cls_refcount_set_op {
std::list<std::string> refs;
cls_refcount_set_op() {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(refs, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(refs, bl);
DECODE_FINISH(bl);
}
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<cls_refcount_set_op*>& ls);
};
WRITE_CLASS_ENCODER(cls_refcount_set_op)
struct cls_refcount_read_op {
bool implicit_ref; // assume wildcard reference for
// objects without a std::set ref
cls_refcount_read_op() : implicit_ref(false) {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(implicit_ref, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(implicit_ref, bl);
DECODE_FINISH(bl);
}
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<cls_refcount_read_op*>& ls);
};
WRITE_CLASS_ENCODER(cls_refcount_read_op)
struct cls_refcount_read_ret {
std::list<std::string> refs;
cls_refcount_read_ret() {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(refs, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(refs, bl);
DECODE_FINISH(bl);
}
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<cls_refcount_read_ret*>& ls);
};
WRITE_CLASS_ENCODER(cls_refcount_read_ret)
struct obj_refcount {
std::map<std::string, bool> refs;
std::set<std::string> retired_refs;
obj_refcount() {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(2, 1, bl);
encode(refs, bl);
encode(retired_refs, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(2, bl);
decode(refs, bl);
if (struct_v >= 2) {
decode(retired_refs, bl);
}
DECODE_FINISH(bl);
}
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<obj_refcount*>& ls);
};
WRITE_CLASS_ENCODER(obj_refcount)
#endif
| 3,762 | 23.277419 | 77 | h |
null | ceph-main/src/cls/rgw/cls_rgw_const.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#define RGW_CLASS "rgw"
/* Special error code returned by cls bucket list operation if it was
* unable to skip past enough not visibile entries to return any
* entries in the call. */
constexpr int RGWBIAdvanceAndRetryError = -EFBIG;
/* bucket index */
#define RGW_BUCKET_INIT_INDEX "bucket_init_index"
#define RGW_BUCKET_SET_TAG_TIMEOUT "bucket_set_tag_timeout"
#define RGW_BUCKET_LIST "bucket_list"
#define RGW_BUCKET_CHECK_INDEX "bucket_check_index"
#define RGW_BUCKET_REBUILD_INDEX "bucket_rebuild_index"
#define RGW_BUCKET_UPDATE_STATS "bucket_update_stats"
#define RGW_BUCKET_PREPARE_OP "bucket_prepare_op"
#define RGW_BUCKET_COMPLETE_OP "bucket_complete_op"
#define RGW_BUCKET_LINK_OLH "bucket_link_olh"
#define RGW_BUCKET_UNLINK_INSTANCE "bucket_unlink_instance"
#define RGW_BUCKET_READ_OLH_LOG "bucket_read_olh_log"
#define RGW_BUCKET_TRIM_OLH_LOG "bucket_trim_olh_log"
#define RGW_BUCKET_CLEAR_OLH "bucket_clear_olh"
#define RGW_OBJ_REMOVE "obj_remove"
#define RGW_OBJ_STORE_PG_VER "obj_store_pg_ver"
#define RGW_OBJ_CHECK_ATTRS_PREFIX "obj_check_attrs_prefix"
#define RGW_OBJ_CHECK_MTIME "obj_check_mtime"
#define RGW_BI_GET "bi_get"
#define RGW_BI_PUT "bi_put"
#define RGW_BI_LIST "bi_list"
#define RGW_BI_LOG_LIST "bi_log_list"
#define RGW_BI_LOG_TRIM "bi_log_trim"
#define RGW_DIR_SUGGEST_CHANGES "dir_suggest_changes"
#define RGW_BI_LOG_RESYNC "bi_log_resync"
#define RGW_BI_LOG_STOP "bi_log_stop"
/* usage logging */
#define RGW_USER_USAGE_LOG_ADD "user_usage_log_add"
#define RGW_USER_USAGE_LOG_READ "user_usage_log_read"
#define RGW_USER_USAGE_LOG_TRIM "user_usage_log_trim"
#define RGW_USAGE_LOG_CLEAR "usage_log_clear"
/* garbage collection */
#define RGW_GC_SET_ENTRY "gc_set_entry"
#define RGW_GC_DEFER_ENTRY "gc_defer_entry"
#define RGW_GC_LIST "gc_list"
#define RGW_GC_REMOVE "gc_remove"
/* lifecycle bucket list */
#define RGW_LC_GET_ENTRY "lc_get_entry"
#define RGW_LC_SET_ENTRY "lc_set_entry"
#define RGW_LC_RM_ENTRY "lc_rm_entry"
#define RGW_LC_GET_NEXT_ENTRY "lc_get_next_entry"
#define RGW_LC_PUT_HEAD "lc_put_head"
#define RGW_LC_GET_HEAD "lc_get_head"
#define RGW_LC_LIST_ENTRIES "lc_list_entries"
/* multipart */
#define RGW_MP_UPLOAD_PART_INFO_UPDATE "mp_upload_part_info_update"
/* resharding */
#define RGW_RESHARD_ADD "reshard_add"
#define RGW_RESHARD_LIST "reshard_list"
#define RGW_RESHARD_GET "reshard_get"
#define RGW_RESHARD_REMOVE "reshard_remove"
/* resharding attribute */
#define RGW_SET_BUCKET_RESHARDING "set_bucket_resharding"
#define RGW_CLEAR_BUCKET_RESHARDING "clear_bucket_resharding"
#define RGW_GUARD_BUCKET_RESHARDING "guard_bucket_resharding"
#define RGW_GET_BUCKET_RESHARDING "get_bucket_resharding"
| 2,788 | 33.432099 | 70 | h |
null | ceph-main/src/cls/rgw_gc/cls_rgw_gc_ops.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "cls/rgw/cls_rgw_types.h"
struct cls_rgw_gc_queue_init_op {
uint64_t size;
uint64_t num_deferred_entries{0};
cls_rgw_gc_queue_init_op() {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(size, bl);
encode(num_deferred_entries, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(size, bl);
decode(num_deferred_entries, bl);
DECODE_FINISH(bl);
}
};
WRITE_CLASS_ENCODER(cls_rgw_gc_queue_init_op)
struct cls_rgw_gc_queue_remove_entries_op {
uint64_t num_entries;
cls_rgw_gc_queue_remove_entries_op() {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(num_entries, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(num_entries, bl);
DECODE_FINISH(bl);
}
};
WRITE_CLASS_ENCODER(cls_rgw_gc_queue_remove_entries_op)
struct cls_rgw_gc_queue_defer_entry_op {
uint32_t expiration_secs;
cls_rgw_gc_obj_info info;
cls_rgw_gc_queue_defer_entry_op() : expiration_secs(0) {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(expiration_secs, bl);
encode(info, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(expiration_secs, bl);
decode(info, bl);
DECODE_FINISH(bl);
}
};
WRITE_CLASS_ENCODER(cls_rgw_gc_queue_defer_entry_op)
| 1,625 | 22.228571 | 70 | h |
null | ceph-main/src/cls/rgw_gc/cls_rgw_gc_types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "include/types.h"
#include <unordered_map>
struct cls_rgw_gc_urgent_data
{
std::unordered_map<std::string, ceph::real_time> urgent_data_map;
uint32_t num_urgent_data_entries{0}; // requested by user
uint32_t num_head_urgent_entries{0}; // actual number of entries in queue head
uint32_t num_xattr_urgent_entries{0}; // actual number of entries in xattr in case of spill over
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(urgent_data_map, bl);
encode(num_urgent_data_entries, bl);
encode(num_head_urgent_entries, bl);
encode(num_xattr_urgent_entries, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(urgent_data_map, bl);
decode(num_urgent_data_entries, bl);
decode(num_head_urgent_entries, bl);
decode(num_xattr_urgent_entries, bl);
DECODE_FINISH(bl);
}
};
WRITE_CLASS_ENCODER(cls_rgw_gc_urgent_data)
| 1,078 | 29.828571 | 98 | h |
null | ceph-main/src/cls/timeindex/cls_timeindex_client.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_CLS_TIMEINDEX_CLIENT_H
#define CEPH_CLS_TIMEINDEX_CLIENT_H
#include "include/rados/librados.hpp"
#include "cls_timeindex_ops.h"
/**
* timeindex objclass
*/
class TimeindexListCtx : public librados::ObjectOperationCompletion {
std::list<cls_timeindex_entry> *entries;
std::string *marker;
bool *truncated;
public:
///* ctor
TimeindexListCtx(
std::list<cls_timeindex_entry> *_entries,
std::string *_marker,
bool *_truncated)
: entries(_entries), marker(_marker), truncated(_truncated) {}
///* dtor
~TimeindexListCtx() {}
void handle_completion(int r, ceph::buffer::list& bl) override {
if (r >= 0) {
cls_timeindex_list_ret ret;
try {
auto iter = bl.cbegin();
decode(ret, iter);
if (entries)
*entries = ret.entries;
if (truncated)
*truncated = ret.truncated;
if (marker)
*marker = ret.marker;
} catch (ceph::buffer::error& err) {
// nothing we can do about it atm
}
}
}
};
void cls_timeindex_add_prepare_entry(
cls_timeindex_entry& entry,
const utime_t& key_timestamp,
const std::string& key_ext,
ceph::buffer::list& bl);
void cls_timeindex_add(
librados::ObjectWriteOperation& op,
const std::list<cls_timeindex_entry>& entry);
void cls_timeindex_add(
librados::ObjectWriteOperation& op,
const cls_timeindex_entry& entry);
void cls_timeindex_add(
librados::ObjectWriteOperation& op,
const utime_t& timestamp,
const std::string& name,
const ceph::buffer::list& bl);
void cls_timeindex_list(
librados::ObjectReadOperation& op,
const utime_t& from,
const utime_t& to,
const std::string& in_marker,
const int max_entries,
std::list<cls_timeindex_entry>& entries,
std::string *out_marker,
bool *truncated);
void cls_timeindex_trim(
librados::ObjectWriteOperation& op,
const utime_t& from_time,
const utime_t& to_time,
const std::string& from_marker = std::string(),
const std::string& to_marker = std::string());
// these overloads which call io_ctx.operate() should not be called in the rgw.
// rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate()
#ifndef CLS_CLIENT_HIDE_IOCTX
int cls_timeindex_trim(
librados::IoCtx& io_ctx,
const std::string& oid,
const utime_t& from_time,
const utime_t& to_time,
const std::string& from_marker = std::string(),
const std::string& to_marker = std::string());
#endif
#endif
| 2,571 | 24.979798 | 89 | h |
null | ceph-main/src/cls/timeindex/cls_timeindex_ops.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_CLS_TIMEINDEX_OPS_H
#define CEPH_CLS_TIMEINDEX_OPS_H
#include "cls_timeindex_types.h"
struct cls_timeindex_add_op {
std::list<cls_timeindex_entry> entries;
cls_timeindex_add_op() {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(entries, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(entries, bl);
DECODE_FINISH(bl);
}
};
WRITE_CLASS_ENCODER(cls_timeindex_add_op)
struct cls_timeindex_list_op {
utime_t from_time;
std::string marker; /* if not empty, overrides from_time */
utime_t to_time; /* not inclusive */
int max_entries; /* upperbound to returned num of entries
might return less than that and still be truncated */
cls_timeindex_list_op() : max_entries(0) {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(from_time, bl);
encode(marker, bl);
encode(to_time, bl);
encode(max_entries, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(from_time, bl);
decode(marker, bl);
decode(to_time, bl);
decode(max_entries, bl);
DECODE_FINISH(bl);
}
};
WRITE_CLASS_ENCODER(cls_timeindex_list_op)
struct cls_timeindex_list_ret {
std::list<cls_timeindex_entry> entries;
std::string marker;
bool truncated;
cls_timeindex_list_ret() : truncated(false) {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(entries, bl);
encode(marker, bl);
encode(truncated, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(entries, bl);
decode(marker, bl);
decode(truncated, bl);
DECODE_FINISH(bl);
}
};
WRITE_CLASS_ENCODER(cls_timeindex_list_ret)
/*
* operation will return 0 when successfully removed but not done. Will return
* -ENODATA when done, so caller needs to repeat sending request until that.
*/
struct cls_timeindex_trim_op {
utime_t from_time;
utime_t to_time; /* inclusive */
std::string from_marker;
std::string to_marker;
cls_timeindex_trim_op() {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(from_time, bl);
encode(to_time, bl);
encode(from_marker, bl);
encode(to_marker, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(from_time, bl);
decode(to_time, bl);
decode(from_marker, bl);
decode(to_marker, bl);
DECODE_FINISH(bl);
}
};
WRITE_CLASS_ENCODER(cls_timeindex_trim_op)
#endif /* CEPH_CLS_TIMEINDEX_OPS_H */
| 2,832 | 23.422414 | 78 | h |
null | ceph-main/src/cls/timeindex/cls_timeindex_types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_CLS_TIMEINDEX_TYPES_H
#define CEPH_CLS_TIMEINDEX_TYPES_H
#include "include/encoding.h"
#include "include/types.h"
#include "include/utime.h"
class JSONObj;
struct cls_timeindex_entry {
/* Mandatory timestamp. Will be part of the key. */
utime_t key_ts;
/* Not mandatory. The name_ext field, if not empty, will form second
* part of the key. */
std::string key_ext;
/* Become value of OMAP-based mapping. */
ceph::buffer::list value;
cls_timeindex_entry() {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(key_ts, bl);
encode(key_ext, bl);
encode(value, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(key_ts, bl);
decode(key_ext, bl);
decode(value, bl);
DECODE_FINISH(bl);
}
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<cls_timeindex_entry*>& o);
};
WRITE_CLASS_ENCODER(cls_timeindex_entry)
#endif /* CEPH_CLS_TIMEINDEX_TYPES_H */
| 1,147 | 23.425532 | 74 | h |
null | ceph-main/src/cls/user/cls_user_client.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_CLS_USER_CLIENT_H
#define CEPH_CLS_USER_CLIENT_H
#include "include/rados/librados_fwd.hpp"
#include "cls_user_ops.h"
#include "common/RefCountedObj.h"
class RGWGetUserHeader_CB : public RefCountedObject {
public:
~RGWGetUserHeader_CB() override {}
virtual void handle_response(int r, cls_user_header& header) = 0;
};
/*
* user objclass
*/
void cls_user_set_buckets(librados::ObjectWriteOperation& op, std::list<cls_user_bucket_entry>& entries, bool add);
void cls_user_complete_stats_sync(librados::ObjectWriteOperation& op);
void cls_user_remove_bucket(librados::ObjectWriteOperation& op, const cls_user_bucket& bucket);
void cls_user_bucket_list(librados::ObjectReadOperation& op,
const std::string& in_marker,
const std::string& end_marker,
int max_entries,
std::list<cls_user_bucket_entry>& entries,
std::string *out_marker,
bool *truncated,
int *pret);
void cls_user_get_header(librados::ObjectReadOperation& op, cls_user_header *header, int *pret);
int cls_user_get_header_async(librados::IoCtx& io_ctx, std::string& oid, RGWGetUserHeader_CB *ctx);
void cls_user_reset_stats(librados::ObjectWriteOperation& op);
#endif
| 1,284 | 33.72973 | 115 | h |
null | ceph-main/src/cls/user/cls_user_ops.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_CLS_USER_OPS_H
#define CEPH_CLS_USER_OPS_H
#include "cls_user_types.h"
struct cls_user_set_buckets_op {
std::list<cls_user_bucket_entry> entries;
bool add;
ceph::real_time time; /* op time */
cls_user_set_buckets_op() : add(false) {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(entries, bl);
encode(add, bl);
encode(time, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(entries, bl);
decode(add, bl);
decode(time, bl);
DECODE_FINISH(bl);
}
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<cls_user_set_buckets_op*>& ls);
};
WRITE_CLASS_ENCODER(cls_user_set_buckets_op)
struct cls_user_remove_bucket_op {
cls_user_bucket bucket;
cls_user_remove_bucket_op() {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(bucket, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(bucket, bl);
DECODE_FINISH(bl);
}
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<cls_user_remove_bucket_op*>& ls);
};
WRITE_CLASS_ENCODER(cls_user_remove_bucket_op)
struct cls_user_list_buckets_op {
std::string marker;
std::string end_marker;
int max_entries; /* upperbound to returned num of entries
might return less than that and still be truncated */
cls_user_list_buckets_op()
: max_entries(0) {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(2, 1, bl);
encode(marker, bl);
encode(max_entries, bl);
encode(end_marker, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(2, bl);
decode(marker, bl);
decode(max_entries, bl);
if (struct_v >= 2) {
decode(end_marker, bl);
}
DECODE_FINISH(bl);
}
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<cls_user_list_buckets_op*>& ls);
};
WRITE_CLASS_ENCODER(cls_user_list_buckets_op)
struct cls_user_list_buckets_ret {
std::list<cls_user_bucket_entry> entries;
std::string marker;
bool truncated;
cls_user_list_buckets_ret() : truncated(false) {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(entries, bl);
encode(marker, bl);
encode(truncated, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(entries, bl);
decode(marker, bl);
decode(truncated, bl);
DECODE_FINISH(bl);
}
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<cls_user_list_buckets_ret*>& ls);
};
WRITE_CLASS_ENCODER(cls_user_list_buckets_ret)
struct cls_user_get_header_op {
cls_user_get_header_op() {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
DECODE_FINISH(bl);
}
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<cls_user_get_header_op*>& ls);
};
WRITE_CLASS_ENCODER(cls_user_get_header_op)
struct cls_user_reset_stats_op {
ceph::real_time time;
cls_user_reset_stats_op() {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(time, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(time, bl);
DECODE_FINISH(bl);
}
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<cls_user_reset_stats_op*>& ls);
};
WRITE_CLASS_ENCODER(cls_user_reset_stats_op);
struct cls_user_reset_stats2_op {
ceph::real_time time;
std::string marker;
cls_user_stats acc_stats;
cls_user_reset_stats2_op() {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(time, bl);
encode(marker, bl);
encode(acc_stats, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(time, bl);
decode(marker, bl);
decode(acc_stats, bl);
DECODE_FINISH(bl);
}
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<cls_user_reset_stats2_op*>& ls);
};
WRITE_CLASS_ENCODER(cls_user_reset_stats2_op);
struct cls_user_reset_stats2_ret {
std::string marker;
cls_user_stats acc_stats; /* 0-initialized */
bool truncated;
cls_user_reset_stats2_ret()
: truncated(false) {}
void update_call(cls_user_reset_stats2_op& call) {
call.marker = marker;
call.acc_stats = acc_stats;
}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(marker, bl);
encode(acc_stats, bl);
encode(truncated, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(marker, bl);
decode(acc_stats, bl);
decode(truncated, bl);
DECODE_FINISH(bl);
}
void dump(ceph::Formatter *f) const;
static void generate_test_instances(
std::list<cls_user_reset_stats2_ret*>& ls);
};
WRITE_CLASS_ENCODER(cls_user_reset_stats2_ret);
struct cls_user_get_header_ret {
cls_user_header header;
cls_user_get_header_ret() {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(header, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(header, bl);
DECODE_FINISH(bl);
}
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<cls_user_get_header_ret*>& ls);
};
WRITE_CLASS_ENCODER(cls_user_get_header_ret)
struct cls_user_complete_stats_sync_op {
ceph::real_time time;
cls_user_complete_stats_sync_op() {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(time, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(time, bl);
DECODE_FINISH(bl);
}
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<cls_user_complete_stats_sync_op*>& ls);
};
WRITE_CLASS_ENCODER(cls_user_complete_stats_sync_op)
#endif
| 6,495 | 23.238806 | 87 | h |
null | ceph-main/src/cls/user/cls_user_types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_CLS_USER_TYPES_H
#define CEPH_CLS_USER_TYPES_H
#include "include/encoding.h"
#include "include/types.h"
#include "include/utime.h"
#include "common/ceph_time.h"
/*
* this needs to be compatible with rgw_bucket, as it replaces it
*/
struct cls_user_bucket {
std::string name;
std::string marker;
std::string bucket_id;
std::string placement_id;
struct {
std::string data_pool;
std::string index_pool;
std::string data_extra_pool;
} explicit_placement;
void encode(ceph::buffer::list& bl) const {
/* since new version of this structure is not backward compatible,
* we have older rgw running against newer osd if we encode it
* in the new way. Only encode newer version if placement_id is
* not empty, otherwise keep handling it as before
*/
if (!placement_id.empty()) {
ENCODE_START(9, 8, bl);
encode(name, bl);
encode(marker, bl);
encode(bucket_id, bl);
encode(placement_id, bl);
ENCODE_FINISH(bl);
} else {
ENCODE_START(7, 3, bl);
encode(name, bl);
encode(explicit_placement.data_pool, bl);
encode(marker, bl);
encode(bucket_id, bl);
encode(explicit_placement.index_pool, bl);
encode(explicit_placement.data_extra_pool, bl);
ENCODE_FINISH(bl);
}
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START_LEGACY_COMPAT_LEN(8, 3, 3, bl);
decode(name, bl);
if (struct_v < 8) {
decode(explicit_placement.data_pool, bl);
}
if (struct_v >= 2) {
decode(marker, bl);
if (struct_v <= 3) {
uint64_t id;
decode(id, bl);
char buf[16];
snprintf(buf, sizeof(buf), "%llu", (long long)id);
bucket_id = buf;
} else {
decode(bucket_id, bl);
}
}
if (struct_v < 8) {
if (struct_v >= 5) {
decode(explicit_placement.index_pool, bl);
} else {
explicit_placement.index_pool = explicit_placement.data_pool;
}
if (struct_v >= 7) {
decode(explicit_placement.data_extra_pool, bl);
}
} else {
decode(placement_id, bl);
if (struct_v == 8 && placement_id.empty()) {
decode(explicit_placement.data_pool, bl);
decode(explicit_placement.index_pool, bl);
decode(explicit_placement.data_extra_pool, bl);
}
}
DECODE_FINISH(bl);
}
bool operator<(const cls_user_bucket& b) const {
return name.compare(b.name) < 0;
}
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<cls_user_bucket*>& ls);
};
WRITE_CLASS_ENCODER(cls_user_bucket)
/*
* this structure overrides RGWBucketEnt
*/
struct cls_user_bucket_entry {
cls_user_bucket bucket;
size_t size;
size_t size_rounded;
ceph::real_time creation_time;
uint64_t count;
bool user_stats_sync;
cls_user_bucket_entry() : size(0), size_rounded(0), count(0), user_stats_sync(false) {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(9, 5, bl);
uint64_t s = size;
__u32 mt = ceph::real_clock::to_time_t(creation_time);
std::string empty_str; // originally had the bucket name here, but we encode bucket later
encode(empty_str, bl);
encode(s, bl);
encode(mt, bl);
encode(count, bl);
encode(bucket, bl);
s = size_rounded;
encode(s, bl);
encode(user_stats_sync, bl);
encode(creation_time, bl);
//::encode(placement_rule, bl); removed in v9
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START_LEGACY_COMPAT_LEN(9, 5, 5, bl);
__u32 mt;
uint64_t s;
std::string empty_str; // backward compatibility
decode(empty_str, bl);
decode(s, bl);
decode(mt, bl);
size = s;
if (struct_v < 7) {
creation_time = ceph::real_clock::from_time_t(mt);
}
if (struct_v >= 2)
decode(count, bl);
if (struct_v >= 3)
decode(bucket, bl);
if (struct_v >= 4)
decode(s, bl);
size_rounded = s;
if (struct_v >= 6)
decode(user_stats_sync, bl);
if (struct_v >= 7)
decode(creation_time, bl);
if (struct_v == 8) { // added in v8, removed in v9
std::string placement_rule;
decode(placement_rule, bl);
}
DECODE_FINISH(bl);
}
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<cls_user_bucket_entry*>& ls);
};
WRITE_CLASS_ENCODER(cls_user_bucket_entry)
struct cls_user_stats {
uint64_t total_entries;
uint64_t total_bytes;
uint64_t total_bytes_rounded;
cls_user_stats()
: total_entries(0),
total_bytes(0),
total_bytes_rounded(0) {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(total_entries, bl);
encode(total_bytes, bl);
encode(total_bytes_rounded, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(total_entries, bl);
decode(total_bytes, bl);
decode(total_bytes_rounded, bl);
DECODE_FINISH(bl);
}
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<cls_user_stats*>& ls);
};
WRITE_CLASS_ENCODER(cls_user_stats)
/*
* this needs to be compatible with rgw_bucket, as it replaces it
*/
struct cls_user_header {
cls_user_stats stats;
ceph::real_time last_stats_sync; /* last time a full stats sync completed */
ceph::real_time last_stats_update; /* last time a stats update was done */
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(stats, bl);
encode(last_stats_sync, bl);
encode(last_stats_update, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(stats, bl);
decode(last_stats_sync, bl);
decode(last_stats_update, bl);
DECODE_FINISH(bl);
}
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<cls_user_header*>& ls);
};
WRITE_CLASS_ENCODER(cls_user_header)
void cls_user_gen_test_bucket(cls_user_bucket *bucket, int i);
void cls_user_gen_test_bucket_entry(cls_user_bucket_entry *entry, int i);
void cls_user_gen_test_stats(cls_user_stats *stats);
void cls_user_gen_test_header(cls_user_header *h);
#endif
| 6,382 | 27.368889 | 94 | h |
null | ceph-main/src/cls/version/cls_version_client.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_CLS_VERSION_CLIENT_H
#define CEPH_CLS_VERSION_CLIENT_H
#include "include/rados/librados_fwd.hpp"
#include "cls_version_ops.h"
/*
* version objclass
*/
void cls_version_set(librados::ObjectWriteOperation& op, obj_version& ver);
/* increase anyway */
void cls_version_inc(librados::ObjectWriteOperation& op);
/* conditional increase, return -EAGAIN if condition fails */
void cls_version_inc(librados::ObjectWriteOperation& op, obj_version& ver, VersionCond cond);
void cls_version_read(librados::ObjectReadOperation& op, obj_version *objv);
// these overloads which call io_ctx.operate() or io_ctx.exec() should not be called in the rgw.
// rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate()/exec()
#ifndef CLS_CLIENT_HIDE_IOCTX
int cls_version_read(librados::IoCtx& io_ctx, std::string& oid, obj_version *ver);
#endif
void cls_version_check(librados::ObjectOperation& op, obj_version& ver, VersionCond cond);
#endif
| 1,075 | 31.606061 | 96 | h |
null | ceph-main/src/common/AsyncReserver.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef ASYNC_RESERVER_H
#define ASYNC_RESERVER_H
#include "common/Formatter.h"
#define rdout(x) lgeneric_subdout(cct,reserver,x)
/**
* Manages a configurable number of asynchronous reservations.
*
* Memory usage is linear with the number of items queued and
* linear with respect to the total number of priorities used
* over all time.
*/
template <typename T, typename F>
class AsyncReserver {
CephContext *cct;
F *f;
unsigned max_allowed;
unsigned min_priority;
ceph::mutex lock = ceph::make_mutex("AsyncReserver::lock");
struct Reservation {
T item;
unsigned prio = 0;
Context *grant = 0;
Context *preempt = 0;
Reservation() {}
Reservation(T i, unsigned pr, Context *g, Context *p = 0)
: item(i), prio(pr), grant(g), preempt(p) {}
void dump(ceph::Formatter *f) const {
f->dump_stream("item") << item;
f->dump_unsigned("prio", prio);
f->dump_bool("can_preempt", !!preempt);
}
friend std::ostream& operator<<(std::ostream& out, const Reservation& r) {
return out << r.item << "(prio " << r.prio << " grant " << r.grant
<< " preempt " << r.preempt << ")";
}
};
std::map<unsigned, std::list<Reservation>> queues;
std::map<T, std::pair<unsigned, typename std::list<Reservation>::iterator>> queue_pointers;
std::map<T,Reservation> in_progress;
std::set<std::pair<unsigned,T>> preempt_by_prio; ///< in_progress that can be preempted
void preempt_one() {
ceph_assert(!preempt_by_prio.empty());
auto q = in_progress.find(preempt_by_prio.begin()->second);
ceph_assert(q != in_progress.end());
Reservation victim = q->second;
rdout(10) << __func__ << " preempt " << victim << dendl;
f->queue(victim.preempt);
victim.preempt = nullptr;
in_progress.erase(q);
preempt_by_prio.erase(preempt_by_prio.begin());
}
void do_queues() {
rdout(20) << __func__ << ":\n";
ceph::JSONFormatter jf(true);
jf.open_object_section("queue");
_dump(&jf);
jf.close_section();
jf.flush(*_dout);
*_dout << dendl;
// in case min_priority was adjusted up or max_allowed was adjusted down
while (!preempt_by_prio.empty() &&
(in_progress.size() > max_allowed ||
preempt_by_prio.begin()->first < min_priority)) {
preempt_one();
}
while (!queues.empty()) {
// choose highest priority queue
auto it = queues.end();
--it;
ceph_assert(!it->second.empty());
if (it->first < min_priority) {
break;
}
if (in_progress.size() >= max_allowed &&
!preempt_by_prio.empty() &&
it->first > preempt_by_prio.begin()->first) {
preempt_one();
}
if (in_progress.size() >= max_allowed) {
break; // no room
}
// grant
Reservation p = it->second.front();
rdout(10) << __func__ << " grant " << p << dendl;
queue_pointers.erase(p.item);
it->second.pop_front();
if (it->second.empty()) {
queues.erase(it);
}
f->queue(p.grant);
p.grant = nullptr;
in_progress[p.item] = p;
if (p.preempt) {
preempt_by_prio.insert(std::make_pair(p.prio, p.item));
}
}
}
public:
AsyncReserver(
CephContext *cct,
F *f,
unsigned max_allowed,
unsigned min_priority = 0)
: cct(cct),
f(f),
max_allowed(max_allowed),
min_priority(min_priority) {}
void set_max(unsigned max) {
std::lock_guard l(lock);
max_allowed = max;
do_queues();
}
void set_min_priority(unsigned min) {
std::lock_guard l(lock);
min_priority = min;
do_queues();
}
/**
* Update the priority of a reservation
*
* Note, on_reserved may be called following update_priority. Thus,
* the callback must be safe in that case. Callback will be called
* with no locks held. cancel_reservation must be called to release the
* reservation slot.
*
* Cases
* 1. Item is queued, re-queue with new priority
* 2. Item is queued, re-queue and preempt if new priority higher than an in progress item
* 3. Item is in progress, just adjust priority if no higher priority waiting
* 4. Item is in progress, adjust priority if higher priority items waiting preempt item
*
*/
void update_priority(T item, unsigned newprio) {
std::lock_guard l(lock);
auto i = queue_pointers.find(item);
if (i != queue_pointers.end()) {
unsigned prio = i->second.first;
if (newprio == prio)
return;
Reservation r = *i->second.second;
rdout(10) << __func__ << " update " << r << " (was queued)" << dendl;
// Like cancel_reservation() without preempting
queues[prio].erase(i->second.second);
if (queues[prio].empty()) {
queues.erase(prio);
}
queue_pointers.erase(i);
// Like request_reservation() to re-queue it but with new priority
ceph_assert(!queue_pointers.count(item) &&
!in_progress.count(item));
r.prio = newprio;
queues[newprio].push_back(r);
queue_pointers.insert(std::make_pair(item,
std::make_pair(newprio,--(queues[newprio]).end())));
} else {
auto p = in_progress.find(item);
if (p != in_progress.end()) {
if (p->second.prio == newprio)
return;
rdout(10) << __func__ << " update " << p->second
<< " (in progress)" << dendl;
// We want to preempt if priority goes down
// and smaller then highest priority waiting
if (p->second.preempt) {
if (newprio < p->second.prio && !queues.empty()) {
// choose highest priority queue
auto it = queues.end();
--it;
ceph_assert(!it->second.empty());
if (it->first > newprio) {
rdout(10) << __func__ << " update " << p->second
<< " lowered priority let do_queues() preempt it" << dendl;
}
}
preempt_by_prio.erase(std::make_pair(p->second.prio, p->second.item));
p->second.prio = newprio;
preempt_by_prio.insert(std::make_pair(p->second.prio, p->second.item));
} else {
p->second.prio = newprio;
}
} else {
rdout(10) << __func__ << " update " << item << " (not found)" << dendl;
}
}
do_queues();
return;
}
void dump(ceph::Formatter *f) {
std::lock_guard l(lock);
_dump(f);
}
void _dump(ceph::Formatter *f) {
f->dump_unsigned("max_allowed", max_allowed);
f->dump_unsigned("min_priority", min_priority);
f->open_array_section("queues");
for (auto& p : queues) {
f->open_object_section("queue");
f->dump_unsigned("priority", p.first);
f->open_array_section("items");
for (auto& q : p.second) {
f->dump_object("item", q);
}
f->close_section();
f->close_section();
}
f->close_section();
f->open_array_section("in_progress");
for (auto& p : in_progress) {
f->dump_object("item", p.second);
}
f->close_section();
}
/**
* Requests a reservation
*
* Note, on_reserved may be called following cancel_reservation. Thus,
* the callback must be safe in that case. Callback will be called
* with no locks held. cancel_reservation must be called to release the
* reservation slot.
*/
void request_reservation(
T item, ///< [in] reservation key
Context *on_reserved, ///< [in] callback to be called on reservation
unsigned prio, ///< [in] priority
Context *on_preempt = 0 ///< [in] callback to be called if we are preempted (optional)
) {
std::lock_guard l(lock);
Reservation r(item, prio, on_reserved, on_preempt);
rdout(10) << __func__ << " queue " << r << dendl;
ceph_assert(!queue_pointers.count(item) &&
!in_progress.count(item));
queues[prio].push_back(r);
queue_pointers.insert(std::make_pair(item,
std::make_pair(prio,--(queues[prio]).end())));
do_queues();
}
/**
* Cancels reservation
*
* Frees the reservation under key for use.
* Note, after cancel_reservation, the reservation_callback may or
* may not still be called.
*/
void cancel_reservation(
T item ///< [in] key for reservation to cancel
) {
std::lock_guard l(lock);
auto i = queue_pointers.find(item);
if (i != queue_pointers.end()) {
unsigned prio = i->second.first;
const Reservation& r = *i->second.second;
rdout(10) << __func__ << " cancel " << r << " (was queued)" << dendl;
delete r.grant;
delete r.preempt;
queues[prio].erase(i->second.second);
if (queues[prio].empty()) {
queues.erase(prio);
}
queue_pointers.erase(i);
} else {
auto p = in_progress.find(item);
if (p != in_progress.end()) {
rdout(10) << __func__ << " cancel " << p->second
<< " (was in progress)" << dendl;
if (p->second.preempt) {
preempt_by_prio.erase(std::make_pair(p->second.prio, p->second.item));
delete p->second.preempt;
}
in_progress.erase(p);
} else {
rdout(10) << __func__ << " cancel " << item << " (not found)" << dendl;
}
}
do_queues();
}
/**
* Has reservations
*
* Return true if there are reservations in progress
*/
bool has_reservation() {
std::lock_guard l(lock);
return !in_progress.empty();
}
static const unsigned MAX_PRIORITY = (unsigned)-1;
};
#undef rdout
#endif
| 9,780 | 29.470405 | 93 | h |
null | ceph-main/src/common/CDC.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <vector>
#include <string>
#include "include/types.h"
#include "include/buffer.h"
class CDC {
public:
virtual ~CDC() = default;
/// calculate chunk boundaries as vector of (offset, length) pairs
virtual void calc_chunks(
const bufferlist& inputdata,
std::vector<std::pair<uint64_t, uint64_t>> *chunks) const = 0;
/// set target chunk size as a power of 2, and number of bits for hard min/max
virtual void set_target_bits(int bits, int windowbits = 2) = 0;
static std::unique_ptr<CDC> create(
const std::string& type,
int bits,
int windowbits = 0);
};
void generate_buffer(int size, bufferlist *outbl, int seed = 0);
| 776 | 24.064516 | 80 | h |
null | ceph-main/src/common/Cond.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_COND_H
#define CEPH_COND_H
#include "common/Clock.h"
#include "common/ceph_mutex.h"
#include "include/Context.h"
/**
* context to signal a cond
*
* Generic context to signal a cond and store the return value. We
* assume the caller is holding the appropriate lock.
*/
class C_Cond : public Context {
ceph::condition_variable& cond; ///< Cond to signal
bool *done; ///< true if finish() has been called
int *rval; ///< return value
public:
C_Cond(ceph::condition_variable &c, bool *d, int *r) : cond(c), done(d), rval(r) {
*done = false;
}
void finish(int r) override {
*done = true;
*rval = r;
cond.notify_all();
}
};
/**
* context to signal a cond, protected by a lock
*
* Generic context to signal a cond under a specific lock. We take the
* lock in the finish() callback, so the finish() caller must not
* already hold it.
*/
class C_SafeCond : public Context {
ceph::mutex& lock; ///< Mutex to take
ceph::condition_variable& cond; ///< Cond to signal
bool *done; ///< true after finish() has been called
int *rval; ///< return value (optional)
public:
C_SafeCond(ceph::mutex& l, ceph::condition_variable& c, bool *d, int *r=0)
: lock(l), cond(c), done(d), rval(r) {
*done = false;
}
void finish(int r) override {
std::lock_guard l{lock};
if (rval)
*rval = r;
*done = true;
cond.notify_all();
}
};
/**
* Context providing a simple wait() mechanism to wait for completion
*
* The context will not be deleted as part of complete and must live
* until wait() returns.
*/
class C_SaferCond : public Context {
ceph::mutex lock; ///< Mutex to take
ceph::condition_variable cond; ///< Cond to signal
bool done = false; ///< true after finish() has been called
int rval = 0; ///< return value
public:
C_SaferCond() :
C_SaferCond("C_SaferCond")
{}
explicit C_SaferCond(const std::string &name)
: lock(ceph::make_mutex(name)) {}
void finish(int r) override { complete(r); }
/// We overload complete in order to not delete the context
void complete(int r) override {
std::lock_guard l(lock);
done = true;
rval = r;
cond.notify_all();
}
/// Returns rval once the Context is called
int wait() {
std::unique_lock l{lock};
cond.wait(l, [this] { return done;});
return rval;
}
/// Wait until the \c secs expires or \c complete() is called
int wait_for(double secs) {
return wait_for(ceph::make_timespan(secs));
}
int wait_for(ceph::timespan secs) {
std::unique_lock l{lock};
if (done) {
return rval;
}
if (cond.wait_for(l, secs, [this] { return done; })) {
return rval;
} else {
return ETIMEDOUT;
}
}
};
#endif
| 3,193 | 24.96748 | 84 | h |
null | ceph-main/src/common/ConfUtils.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_CONFUTILS_H
#define CEPH_CONFUTILS_H
#include <deque>
#include <map>
#include <set>
#include <string>
#include <string_view>
#include <vector>
#include "include/buffer_fwd.h"
/*
* Ceph configuration file support.
*
* This class loads an INI-style configuration from a file or bufferlist, and
* holds it in memory. In general, an INI configuration file is composed of
* sections, which contain key/value pairs. You can put comments on the end of
* lines by using either a hash mark (#) or the semicolon (;).
*
* You can get information out of ConfFile by calling get_key or by examining
* individual sections.
*
* This class could be extended to support modifying configuration files and
* writing them back out without too much difficulty. Currently, this is not
* implemented, and the file is read-only.
*/
struct conf_line_t {
conf_line_t() = default;
conf_line_t(const std::string& key, const std::string& val);
bool operator<(const conf_line_t& rhs) const;
std::string key;
std::string val;
};
std::ostream &operator<<(std::ostream& oss, const conf_line_t& line);
class conf_section_t : public std::set<conf_line_t> {
public:
conf_section_t() = default;
conf_section_t(const std::string& heading,
const std::vector<conf_line_t>& lines);
std::string heading;
friend std::ostream& operator<<(std::ostream& os, const conf_section_t&);
};
class ConfFile : public std::map<std::string, conf_section_t, std::less<>> {
using base_type = std::map<std::string, conf_section_t, std::less<>>;
public:
ConfFile()
: ConfFile{std::vector<conf_section_t>{}}
{}
ConfFile(const conf_line_t& line)
: ConfFile{{conf_section_t{"global", {line}}}}
{}
ConfFile(const std::vector<conf_section_t>& sections);
int parse_file(const std::string &fname, std::ostream *warnings);
int parse_bufferlist(ceph::bufferlist *bl, std::ostream *warnings);
bool parse_buffer(std::string_view buf, std::ostream* warning);
int read(std::string_view section, std::string_view key,
std::string &val) const;
static std::string normalize_key_name(std::string_view key);
// print warnings to os if any old-style section name is found
//
// consider a section name as old-style name if it starts with any of the
// given prefixes, but does not follow with a "."
void check_old_style_section_names(const std::vector<std::string>& prefixes,
std::ostream& os);
};
std::ostream &operator<<(std::ostream& oss, const ConfFile& cf);
#endif
| 2,911 | 31.719101 | 78 | h |
null | ceph-main/src/common/Continuation.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "include/Context.h"
/**
* The Continuation interface is designed to help easily create multi-step
* operations that share data without having to pass it around or create
* custom Context classes for each step. To write a Continuation:
* 1) create a child class with a function for each stage.
* 2) Put all your shared data members into the class.
* 3) In the constructor, register each function stage with set_callback().
* 4) Whenever you need to provide a Context callback that activates the next
* stage, call get_callback(stage_number). If you need to proceed to another
* stage immediately, call immediate(stage, retcode) and return its result.
*
* To use a class:
* 1) Construct the child class on the heap.
* 2) Call begin().
* 3) The destructor will be called once one of your functions returns true to
* indicate it is done.
*
* Please note that while you can skip stages and get multiple Callback
* objects at once, you *cannot* have any stage report that the Continuation
* is completed while any other stage Callbacks are outstanding. It's best to
* be serial unless you want to maintain your own metadata about which stages
* are still pending.
*
* In fact, there are only two situations in which a stage should return
* true while others are running:
* 1) A Callback was issued and completed in the same thread,
* 2) you called immediate(stage) and it is returning true.
*/
class Continuation {
std::set<int> stages_in_flight;
std::set<int> stages_processing;
int rval;
Context *on_finish;
bool reported_done;
class Callback : public Context {
Continuation *continuation;
int stage_to_activate;
public:
Callback(Continuation *c, int stage) :
continuation(c),
stage_to_activate(stage) {}
void finish(int r) override {
continuation->continue_function(r, stage_to_activate);
}
};
protected:
typedef bool (Continuation::*stagePtr)(int r);
/**
* Continue immediately to the given stage. It will be executed
* immediately, in the given thread.
* @pre You are in a callback function.
* @param stage The stage to execute
* @param r The return code that will be provided to the next stage
*/
bool immediate(int stage, int r) {
ceph_assert(!stages_in_flight.count(stage));
ceph_assert(!stages_processing.count(stage));
stages_in_flight.insert(stage);
stages_processing.insert(stage);
return _continue_function(r, stage);
}
/**
* Obtain a Context * that when complete()ed calls back into the given stage.
* @pre You are in a callback function.
* @param stage The stage this Context should activate
*/
Context *get_callback(int stage) {
stages_in_flight.insert(stage);
return new Callback(this, stage);
}
/**
* Set the return code that is passed to the finally-activated Context.
* @param new_rval The return code to use.
*/
void set_rval(int new_rval) { rval = new_rval; }
int get_rval() { return rval; }
/**
* Register member functions as associated with a given stage. Start
* your stage IDs at 0 and make that one the setup phase.
* @pre There are no other functions associated with the stage.
* @param stage The stage to associate this function with
* @param func The function to use
*/
void set_callback(int stage, stagePtr func) {
ceph_assert(callbacks.find(stage) == callbacks.end());
callbacks[stage] = func;
}
/**
* Called when the Continuation is done, as determined by a stage returning
* true and us having finished all the currently-processing ones.
*/
virtual void _done() {
on_finish->complete(rval);
on_finish = NULL;
return;
}
private:
std::map<int, Continuation::stagePtr> callbacks;
bool _continue_function(int r, int n) {
std::set<int>::iterator in_flight_iter = stages_in_flight.find(n);
ceph_assert(in_flight_iter != stages_in_flight.end());
ceph_assert(callbacks.count(n));
stagePtr p = callbacks[n];
[[maybe_unused]] auto [processing_iter, inserted] =
stages_processing.insert(n);
bool done = (this->*p)(r);
if (done)
reported_done = true;
stages_processing.erase(processing_iter);
stages_in_flight.erase(in_flight_iter);
return done;
}
void continue_function(int r, int stage) {
bool done = _continue_function(r, stage);
assert (!done ||
stages_in_flight.size() == stages_processing.size());
if ((done || reported_done) && stages_processing.empty()) {
_done();
delete this;
}
}
public:
/**
* Construct a new Continuation object. Call this from your child class,
* obviously.
*
* @Param c The Context which should be complete()ed when this Continuation
* is done.
*/
Continuation(Context *c) :
rval(0), on_finish(c), reported_done(false) {}
/**
* Clean up.
*/
virtual ~Continuation() { ceph_assert(on_finish == NULL); }
/**
* Begin running the Continuation.
*/
void begin() { stages_in_flight.insert(0); continue_function(0, 0); }
};
| 5,470 | 30.262857 | 79 | h |
null | ceph-main/src/common/Cycles.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 UnitedStack <[email protected]>
*
* Author: Haomai Wang <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
/* Copyright (c) 2011-2014 Stanford University
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef CEPH_CYCLES_H
#define CEPH_CYCLES_H
#include <cstdint>
/**
* This class provides static methods that read the fine-grain CPU
* cycle counter and translate between cycle-level times and absolute
* times.
*/
class Cycles {
public:
static void init();
/**
* Return the current value of the fine-grain CPU cycle counter
* (accessed via the RDTSC instruction).
*/
static __inline __attribute__((always_inline)) uint64_t rdtsc() {
#if defined(__i386__)
int64_t ret;
__asm__ volatile ("rdtsc" : "=A" (ret) );
return ret;
#elif defined(__x86_64__) || defined(__amd64__)
uint32_t lo, hi;
__asm__ __volatile__("rdtsc" : "=a" (lo), "=d" (hi));
return (((uint64_t)hi << 32) | lo);
#elif defined(__aarch64__)
//
// arch/arm64/include/asm/arch_timer.h
//
// static inline u64 arch_counter_get_cntvct(void)
// {
// u64 cval;
//
// isb();
// asm volatile("mrs %0, cntvct_el0" : "=r" (cval));
//
// return cval;
// }
//
// https://github.com/cloudius-systems/osv/blob/master/arch/aarch64/arm-clock.cc
uint64_t cntvct;
asm volatile ("isb; mrs %0, cntvct_el0; isb; " : "=r" (cntvct) :: "memory");
return cntvct;
#elif defined(__powerpc__) || defined (__powerpc64__)
// Based on:
// https://github.com/randombit/botan/blob/net.randombit.botan/src/lib/entropy/hres_timer/hres_timer.cpp
uint32_t lo = 0, hi = 0;
asm volatile("mftbu %0; mftb %1" : "=r" (hi), "=r" (lo));
return (((uint64_t)hi << 32) | lo);
#elif defined(__s390__)
uint64_t tsc;
asm volatile("stck %0" : "=Q" (tsc) : : "cc");
return tsc;
#else
#warning No high-precision counter available for your OS/arch
return 0;
#endif
}
static double per_second();
static double to_seconds(uint64_t cycles, double cycles_per_sec = 0);
static uint64_t from_seconds(double seconds, double cycles_per_sec = 0);
static uint64_t to_microseconds(uint64_t cycles, double cycles_per_sec = 0);
static uint64_t to_nanoseconds(uint64_t cycles, double cycles_per_sec = 0);
static uint64_t from_nanoseconds(uint64_t ns, double cycles_per_sec = 0);
static void sleep(uint64_t us);
private:
Cycles();
/// Conversion factor between cycles and the seconds; computed by
/// Cycles::init.
static double cycles_per_sec;
/**
* Returns the conversion factor between cycles in seconds, using
* a mock value for testing when appropriate.
*/
static __inline __attribute__((always_inline)) double get_cycles_per_sec() {
return cycles_per_sec;
}
};
#endif // CEPH_CYCLES_H
| 3,877 | 31.864407 | 108 | h |
null | ceph-main/src/common/DecayCounter.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_DECAYCOUNTER_H
#define CEPH_DECAYCOUNTER_H
#include "include/buffer.h"
#include "common/Formatter.h"
#include "common/StackStringStream.h"
#include "common/ceph_time.h"
#include <cmath>
#include <list>
#include <sstream>
/**
*
* TODO: normalize value based on some function of half_life,
* so that it can be interpreted as an approximation of a
* moving average of N seconds. currently, changing half-life
* skews the scale of the value, even at steady state.
*
*/
class DecayRate {
public:
friend class DecayCounter;
DecayRate() {}
// cppcheck-suppress noExplicitConstructor
DecayRate(double hl) { set_halflife(hl); }
DecayRate(const DecayRate &dr) : k(dr.k) {}
void set_halflife(double hl) {
k = log(.5) / hl;
}
double get_halflife() const {
return log(.5) / k;
}
private:
double k = 0; // k = ln(.5)/half_life
};
class DecayCounter {
public:
using time = ceph::coarse_mono_time;
using clock = ceph::coarse_mono_clock;
DecayCounter() : DecayCounter(DecayRate()) {}
explicit DecayCounter(const DecayRate &rate) : last_decay(clock::now()), rate(rate) {}
void encode(ceph::buffer::list& bl) const;
void decode(ceph::buffer::list::const_iterator& p);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<DecayCounter*>& ls);
/**
* reading
*/
double get() const {
decay();
return val;
}
double get_last() const {
return val;
}
time get_last_decay() const {
return last_decay;
}
/**
* adjusting
*/
double hit(double v = 1.0) {
decay(v);
return val;
}
void adjust(double v = 1.0) {
decay(v);
}
void scale(double f) {
val *= f;
}
/**
* decay etc.
*/
void reset() {
last_decay = clock::now();
val = 0;
}
protected:
void decay(double delta) const;
void decay() const {decay(0.0);}
private:
mutable double val = 0.0; // value
mutable time last_decay = clock::zero(); // time of last decay
DecayRate rate;
};
inline void encode(const DecayCounter &c, ceph::buffer::list &bl) {
c.encode(bl);
}
inline void decode(DecayCounter &c, ceph::buffer::list::const_iterator &p) {
c.decode(p);
}
inline std::ostream& operator<<(std::ostream& out, const DecayCounter& d) {
CachedStackStringStream css;
css->precision(2);
double val = d.get();
*css << "[C " << std::scientific << val << "]";
return out << css->strv();
}
#endif
| 2,902 | 20.189781 | 88 | h |
null | ceph-main/src/common/EventTrace.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2016 Intel Corporation.
* All rights reserved.
*
* Author: Anjaneya Chagam <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef _EventTrace_h_
#define _EventTrace_h_
#include "msg/Message.h"
#if defined(WITH_EVENTTRACE)
#define OID_EVENT_TRACE(oid, event) \
EventTrace::trace_oid_event(oid, event, "", __FILE__, __func__, __LINE__)
#define OID_EVENT_TRACE_WITH_MSG(msg, event, incl_oid) \
EventTrace::trace_oid_event(msg, event, __FILE__, __func__, __LINE__, incl_oid)
#define OID_ELAPSED(oid, elapsed, event) \
EventTrace::trace_oid_elapsed(oid, event, "", elapsed, __FILE__, __func__, __LINE__)
#define OID_ELAPSED_WITH_MSG(m, elapsed, event, incl_oid) \
EventTrace::trace_oid_elapsed(m, event, elapsed, __FILE__, __func__, __LINE__, incl_oid)
#define FUNCTRACE(cct) EventTrace _t1(cct, __FILE__, __func__, __LINE__)
#define OID_ELAPSED_FUNC_EVENT(event) _t1.log_event_latency(event)
#else
#define OID_EVENT_TRACE(oid, event)
#define OID_EVENT_TRACE_WITH_MSG(msg, event, incl_oid)
#define OID_ELAPSED(oid, elapsed, event)
#define OID_ELAPSED_WITH_MSG(m, elapsed, event, incl_oid)
#define FUNCTRACE(cct)
#define OID_ELAPSED_FUNC_EVENT(event)
#endif
#define LOG_LEVEL 30
class EventTrace {
private:
CephContext *ctx;
std::string file;
std::string func;
int line;
utime_t last_ts;
static bool tpinit;
static void init_tp(CephContext *_ctx);
static void set_message_attrs(const Message *m, std::string& oid, std::string& context, bool incl_oid);
public:
EventTrace(CephContext *_ctx, const char *_file, const char *_func, int line);
~EventTrace();
void log_event_latency(const char *tag);
static void trace_oid_event(const char *oid, const char *event, const char *context,
const char *file, const char *func, int line);
static void trace_oid_event(const Message *m, const char *event, const char *file,
const char *func, int line, bool incl_oid);
static void trace_oid_elapsed(const char *oid, const char *event, const char *context,
double elapsed, const char *file, const char *func, int line);
static void trace_oid_elapsed(const Message *m, const char *event, double elapsed,
const char *file, const char *func, int line, bool incl_oid);
};
#endif
| 2,594 | 31.4375 | 105 | h |
null | ceph-main/src/common/FastCDC.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "CDC.h"
// Based on this paper:
// https://www.usenix.org/system/files/conference/atc16/atc16-paper-xia.pdf
//
// Changes:
// - window size fixed at 64 bytes (to match our word size)
// - use XOR instead of +
// - match mask instead of 0
// - use target mask when close to target size (instead of
// small/large mask). The idea here is to try to use a consistent (target)
// mask for most cut points if we can, and only resort to small/large mask
// when we are (very) small or (very) large.
// Note about the target_bits: The goal is an average chunk size of 1
// << target_bits. However, in reality the average is ~1.25x that
// because of the hard mininum chunk size.
class FastCDC : public CDC {
private:
int target_bits; ///< target chunk size bits (1 << target_bits)
int min_bits; ///< hard minimum chunk size bits (1 << min_bits)
int max_bits; ///< hard maximum chunk size bits (1 << max_bits)
uint64_t target_mask; ///< maskA in the paper (target_bits set)
uint64_t small_mask; ///< maskS in the paper (more bits set)
uint64_t large_mask; ///< maskL in the paper (fewer bits set)
/// lookup table with pseudorandom values for each byte
uint64_t table[256];
/// window size in bytes
const size_t window = sizeof(uint64_t)*8; // bits in uint64_t
void _setup(int target, int window_bits);
public:
FastCDC(int target = 18, int window_bits = 0) {
_setup(target, window_bits);
};
void set_target_bits(int target, int window_bits) override {
_setup(target, window_bits);
}
void calc_chunks(
const bufferlist& bl,
std::vector<std::pair<uint64_t, uint64_t>> *chunks) const override;
};
| 1,803 | 31.8 | 79 | h |
null | ceph-main/src/common/Finisher.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_FINISHER_H
#define CEPH_FINISHER_H
#include "include/Context.h"
#include "include/common_fwd.h"
#include "common/Thread.h"
#include "common/ceph_mutex.h"
#include "common/perf_counters.h"
#include "common/Cond.h"
/// Finisher queue length performance counter ID.
enum {
l_finisher_first = 997082,
l_finisher_queue_len,
l_finisher_complete_lat,
l_finisher_last
};
/** @brief Asynchronous cleanup class.
* Finisher asynchronously completes Contexts, which are simple classes
* representing callbacks, in a dedicated worker thread. Enqueuing
* contexts to complete is thread-safe.
*/
class Finisher {
CephContext *cct;
ceph::mutex finisher_lock; ///< Protects access to queues and finisher_running.
ceph::condition_variable finisher_cond; ///< Signaled when there is something to process.
ceph::condition_variable finisher_empty_cond; ///< Signaled when the finisher has nothing more to process.
bool finisher_stop; ///< Set when the finisher should stop.
bool finisher_running; ///< True when the finisher is currently executing contexts.
bool finisher_empty_wait; ///< True mean someone wait finisher empty.
/// Queue for contexts for which complete(0) will be called.
std::vector<std::pair<Context*,int>> finisher_queue;
std::vector<std::pair<Context*,int>> in_progress_queue;
std::string thread_name;
/// Performance counter for the finisher's queue length.
/// Only active for named finishers.
PerfCounters *logger;
void *finisher_thread_entry();
struct FinisherThread : public Thread {
Finisher *fin;
explicit FinisherThread(Finisher *f) : fin(f) {}
void* entry() override { return fin->finisher_thread_entry(); }
} finisher_thread;
public:
/// Add a context to complete, optionally specifying a parameter for the complete function.
void queue(Context *c, int r = 0) {
std::unique_lock ul(finisher_lock);
bool was_empty = finisher_queue.empty();
finisher_queue.push_back(std::make_pair(c, r));
if (was_empty) {
finisher_cond.notify_one();
}
if (logger)
logger->inc(l_finisher_queue_len);
}
void queue(std::list<Context*>& ls) {
{
std::unique_lock ul(finisher_lock);
if (finisher_queue.empty()) {
finisher_cond.notify_all();
}
for (auto i : ls) {
finisher_queue.push_back(std::make_pair(i, 0));
}
if (logger)
logger->inc(l_finisher_queue_len, ls.size());
}
ls.clear();
}
void queue(std::deque<Context*>& ls) {
{
std::unique_lock ul(finisher_lock);
if (finisher_queue.empty()) {
finisher_cond.notify_all();
}
for (auto i : ls) {
finisher_queue.push_back(std::make_pair(i, 0));
}
if (logger)
logger->inc(l_finisher_queue_len, ls.size());
}
ls.clear();
}
void queue(std::vector<Context*>& ls) {
{
std::unique_lock ul(finisher_lock);
if (finisher_queue.empty()) {
finisher_cond.notify_all();
}
for (auto i : ls) {
finisher_queue.push_back(std::make_pair(i, 0));
}
if (logger)
logger->inc(l_finisher_queue_len, ls.size());
}
ls.clear();
}
/// Start the worker thread.
void start();
/** @brief Stop the worker thread.
*
* Does not wait until all outstanding contexts are completed.
* To ensure that everything finishes, you should first shut down
* all sources that can add contexts to this finisher and call
* wait_for_empty() before calling stop(). */
void stop();
/** @brief Blocks until the finisher has nothing left to process.
* This function will also return when a concurrent call to stop()
* finishes, but this class should never be used in this way. */
void wait_for_empty();
/// Construct an anonymous Finisher.
/// Anonymous finishers do not log their queue length.
explicit Finisher(CephContext *cct_) :
cct(cct_), finisher_lock(ceph::make_mutex("Finisher::finisher_lock")),
finisher_stop(false), finisher_running(false), finisher_empty_wait(false),
thread_name("fn_anonymous"), logger(0),
finisher_thread(this) {}
/// Construct a named Finisher that logs its queue length.
Finisher(CephContext *cct_, std::string name, std::string tn) :
cct(cct_), finisher_lock(ceph::make_mutex("Finisher::" + name)),
finisher_stop(false), finisher_running(false), finisher_empty_wait(false),
thread_name(tn), logger(0),
finisher_thread(this) {
PerfCountersBuilder b(cct, std::string("finisher-") + name,
l_finisher_first, l_finisher_last);
b.add_u64(l_finisher_queue_len, "queue_len");
b.add_time_avg(l_finisher_complete_lat, "complete_latency");
logger = b.create_perf_counters();
cct->get_perfcounters_collection()->add(logger);
logger->set(l_finisher_queue_len, 0);
logger->set(l_finisher_complete_lat, 0);
}
~Finisher() {
if (logger && cct) {
cct->get_perfcounters_collection()->remove(logger);
delete logger;
}
}
};
/// Context that is completed asynchronously on the supplied finisher.
class C_OnFinisher : public Context {
Context *con;
Finisher *fin;
public:
C_OnFinisher(Context *c, Finisher *f) : con(c), fin(f) {
ceph_assert(fin != NULL);
ceph_assert(con != NULL);
}
~C_OnFinisher() override {
if (con != nullptr) {
delete con;
con = nullptr;
}
}
void finish(int r) override {
fin->queue(con, r);
con = nullptr;
}
};
class ContextQueue {
std::list<Context *> q;
std::mutex q_mutex;
ceph::mutex& mutex;
ceph::condition_variable& cond;
std::atomic_bool q_empty = true;
public:
ContextQueue(ceph::mutex& mut,
ceph::condition_variable& con)
: mutex(mut), cond(con) {}
void queue(std::list<Context *>& ls) {
bool was_empty = false;
{
std::scoped_lock l(q_mutex);
if (q.empty()) {
q.swap(ls);
was_empty = true;
} else {
q.insert(q.end(), ls.begin(), ls.end());
}
q_empty = q.empty();
}
if (was_empty) {
std::scoped_lock l{mutex};
cond.notify_all();
}
ls.clear();
}
void move_to(std::list<Context *>& ls) {
ls.clear();
std::scoped_lock l(q_mutex);
if (!q.empty()) {
q.swap(ls);
}
q_empty = true;
}
bool empty() {
return q_empty;
}
};
#endif
| 6,719 | 27 | 108 | h |
null | ceph-main/src/common/FixedCDC.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "CDC.h"
class FixedCDC : public CDC {
private:
size_t chunk_size;
public:
FixedCDC(int target = 18, int window_bits = 0) {
set_target_bits(target, window_bits);
};
void set_target_bits(int target, int window_bits) override {
chunk_size = 1ul << target;
}
void calc_chunks(
const bufferlist& bl,
std::vector<std::pair<uint64_t, uint64_t>> *chunks) const override;
};
| 519 | 20.666667 | 71 | h |
null | ceph-main/src/common/Formatter.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_FORMATTER_H
#define CEPH_FORMATTER_H
#include "include/int_types.h"
#include "include/buffer_fwd.h"
#include <deque>
#include <list>
#include <memory>
#include <vector>
#include <stdarg.h>
#include <sstream>
#include <map>
namespace ceph {
struct FormatterAttrs {
std::list< std::pair<std::string, std::string> > attrs;
FormatterAttrs(const char *attr, ...);
};
class Formatter {
public:
class ObjectSection {
Formatter& formatter;
public:
ObjectSection(Formatter& f, std::string_view name) : formatter(f) {
formatter.open_object_section(name);
}
ObjectSection(Formatter& f, std::string_view name, const char *ns) : formatter(f) {
formatter.open_object_section_in_ns(name, ns);
}
~ObjectSection() {
formatter.close_section();
}
};
class ArraySection {
Formatter& formatter;
public:
ArraySection(Formatter& f, std::string_view name) : formatter(f) {
formatter.open_array_section(name);
}
ArraySection(Formatter& f, std::string_view name, const char *ns) : formatter(f) {
formatter.open_array_section_in_ns(name, ns);
}
~ArraySection() {
formatter.close_section();
}
};
static Formatter *create(std::string_view type,
std::string_view default_type,
std::string_view fallback);
static Formatter *create(std::string_view type,
std::string_view default_type) {
return create(type, default_type, "");
}
static Formatter *create(std::string_view type) {
return create(type, "json-pretty", "");
}
template <typename... Params>
static std::unique_ptr<Formatter> create_unique(Params &&...params)
{
return std::unique_ptr<Formatter>(
Formatter::create(std::forward<Params>(params)...));
}
Formatter();
virtual ~Formatter();
virtual void enable_line_break() = 0;
virtual void flush(std::ostream& os) = 0;
void flush(bufferlist &bl);
virtual void reset() = 0;
virtual void set_status(int status, const char* status_name) = 0;
virtual void output_header() = 0;
virtual void output_footer() = 0;
virtual void open_array_section(std::string_view name) = 0;
virtual void open_array_section_in_ns(std::string_view name, const char *ns) = 0;
virtual void open_object_section(std::string_view name) = 0;
virtual void open_object_section_in_ns(std::string_view name, const char *ns) = 0;
virtual void close_section() = 0;
virtual void dump_unsigned(std::string_view name, uint64_t u) = 0;
virtual void dump_int(std::string_view name, int64_t s) = 0;
virtual void dump_float(std::string_view name, double d) = 0;
virtual void dump_string(std::string_view name, std::string_view s) = 0;
virtual void dump_bool(std::string_view name, bool b)
{
dump_format_unquoted(name, "%s", (b ? "true" : "false"));
}
template<typename T>
void dump_object(std::string_view name, const T& foo) {
open_object_section(name);
foo.dump(this);
close_section();
}
virtual std::ostream& dump_stream(std::string_view name) = 0;
virtual void dump_format_va(std::string_view name, const char *ns, bool quoted, const char *fmt, va_list ap) = 0;
virtual void dump_format(std::string_view name, const char *fmt, ...);
virtual void dump_format_ns(std::string_view name, const char *ns, const char *fmt, ...);
virtual void dump_format_unquoted(std::string_view name, const char *fmt, ...);
virtual int get_len() const = 0;
virtual void write_raw_data(const char *data) = 0;
/* with attrs */
virtual void open_array_section_with_attrs(std::string_view name, const FormatterAttrs& attrs)
{
open_array_section(name);
}
virtual void open_object_section_with_attrs(std::string_view name, const FormatterAttrs& attrs)
{
open_object_section(name);
}
virtual void dump_string_with_attrs(std::string_view name, std::string_view s, const FormatterAttrs& attrs)
{
dump_string(name, s);
}
virtual void *get_external_feature_handler(const std::string& feature) {
return nullptr;
}
virtual void write_bin_data(const char* buff, int buf_len);
};
class copyable_sstream : public std::stringstream {
public:
copyable_sstream() {}
copyable_sstream(const copyable_sstream& rhs) {
str(rhs.str());
}
copyable_sstream& operator=(const copyable_sstream& rhs) {
str(rhs.str());
return *this;
}
};
class JSONFormatter : public Formatter {
public:
explicit JSONFormatter(bool p = false);
void set_status(int status, const char* status_name) override {};
void output_header() override {};
void output_footer() override {};
void enable_line_break() override { m_line_break_enabled = true; }
void flush(std::ostream& os) override;
using Formatter::flush; // don't hide Formatter::flush(bufferlist &bl)
void reset() override;
void open_array_section(std::string_view name) override;
void open_array_section_in_ns(std::string_view name, const char *ns) override;
void open_object_section(std::string_view name) override;
void open_object_section_in_ns(std::string_view name, const char *ns) override;
void close_section() override;
void dump_unsigned(std::string_view name, uint64_t u) override;
void dump_int(std::string_view name, int64_t s) override;
void dump_float(std::string_view name, double d) override;
void dump_string(std::string_view name, std::string_view s) override;
std::ostream& dump_stream(std::string_view name) override;
void dump_format_va(std::string_view name, const char *ns, bool quoted, const char *fmt, va_list ap) override;
int get_len() const override;
void write_raw_data(const char *data) override;
protected:
virtual bool handle_value(std::string_view name, std::string_view s, bool quoted) {
return false; /* is handling done? */
}
virtual bool handle_open_section(std::string_view name, const char *ns, bool is_array) {
return false; /* is handling done? */
}
virtual bool handle_close_section() {
return false; /* is handling done? */
}
int stack_size() { return m_stack.size(); }
private:
struct json_formatter_stack_entry_d {
int size;
bool is_array;
json_formatter_stack_entry_d() : size(0), is_array(false) { }
};
bool m_pretty;
void open_section(std::string_view name, const char *ns, bool is_array);
void print_quoted_string(std::string_view s);
void print_name(std::string_view name);
void print_comma(json_formatter_stack_entry_d& entry);
void finish_pending_string();
template <class T>
void add_value(std::string_view name, T val);
void add_value(std::string_view name, std::string_view val, bool quoted);
copyable_sstream m_ss;
copyable_sstream m_pending_string;
std::string m_pending_name;
std::list<json_formatter_stack_entry_d> m_stack;
bool m_is_pending_string;
bool m_line_break_enabled = false;
};
template <class T>
void add_value(std::string_view name, T val);
class XMLFormatter : public Formatter {
public:
static const char *XML_1_DTD;
XMLFormatter(bool pretty = false, bool lowercased = false, bool underscored = true);
void set_status(int status, const char* status_name) override {}
void output_header() override;
void output_footer() override;
void enable_line_break() override { m_line_break_enabled = true; }
void flush(std::ostream& os) override;
using Formatter::flush; // don't hide Formatter::flush(bufferlist &bl)
void reset() override;
void open_array_section(std::string_view name) override;
void open_array_section_in_ns(std::string_view name, const char *ns) override;
void open_object_section(std::string_view name) override;
void open_object_section_in_ns(std::string_view name, const char *ns) override;
void close_section() override;
void dump_unsigned(std::string_view name, uint64_t u) override;
void dump_int(std::string_view name, int64_t s) override;
void dump_float(std::string_view name, double d) override;
void dump_string(std::string_view name, std::string_view s) override;
std::ostream& dump_stream(std::string_view name) override;
void dump_format_va(std::string_view name, const char *ns, bool quoted, const char *fmt, va_list ap) override;
int get_len() const override;
void write_raw_data(const char *data) override;
void write_bin_data(const char* buff, int len) override;
/* with attrs */
void open_array_section_with_attrs(std::string_view name, const FormatterAttrs& attrs) override;
void open_object_section_with_attrs(std::string_view name, const FormatterAttrs& attrs) override;
void dump_string_with_attrs(std::string_view name, std::string_view s, const FormatterAttrs& attrs) override;
protected:
void open_section_in_ns(std::string_view name, const char *ns, const FormatterAttrs *attrs);
void finish_pending_string();
void print_spaces();
void get_attrs_str(const FormatterAttrs *attrs, std::string& attrs_str);
char to_lower_underscore(char c) const;
std::stringstream m_ss, m_pending_string;
std::deque<std::string> m_sections;
const bool m_pretty;
const bool m_lowercased;
const bool m_underscored;
std::string m_pending_string_name;
bool m_header_done;
bool m_line_break_enabled = false;
private:
template <class T>
void add_value(std::string_view name, T val);
};
class TableFormatter : public Formatter {
public:
explicit TableFormatter(bool keyval = false);
void set_status(int status, const char* status_name) override {};
void output_header() override {};
void output_footer() override {};
void enable_line_break() override {};
void flush(std::ostream& os) override;
using Formatter::flush; // don't hide Formatter::flush(bufferlist &bl)
void reset() override;
void open_array_section(std::string_view name) override;
void open_array_section_in_ns(std::string_view name, const char *ns) override;
void open_object_section(std::string_view name) override;
void open_object_section_in_ns(std::string_view name, const char *ns) override;
void open_array_section_with_attrs(std::string_view name, const FormatterAttrs& attrs) override;
void open_object_section_with_attrs(std::string_view name, const FormatterAttrs& attrs) override;
void close_section() override;
void dump_unsigned(std::string_view name, uint64_t u) override;
void dump_int(std::string_view name, int64_t s) override;
void dump_float(std::string_view name, double d) override;
void dump_string(std::string_view name, std::string_view s) override;
void dump_format_va(std::string_view name, const char *ns, bool quoted, const char *fmt, va_list ap) override;
void dump_string_with_attrs(std::string_view name, std::string_view s, const FormatterAttrs& attrs) override;
std::ostream& dump_stream(std::string_view name) override;
int get_len() const override;
void write_raw_data(const char *data) override;
void get_attrs_str(const FormatterAttrs *attrs, std::string& attrs_str);
private:
template <class T>
void add_value(std::string_view name, T val);
void open_section_in_ns(std::string_view name, const char *ns, const FormatterAttrs *attrs);
std::vector< std::vector<std::pair<std::string, std::string> > > m_vec;
std::stringstream m_ss;
size_t m_vec_index(std::string_view name);
std::string get_section_name(std::string_view name);
void finish_pending_string();
std::string m_pending_name;
bool m_keyval;
int m_section_open;
std::vector< std::string > m_section;
std::map<std::string, int> m_section_cnt;
std::vector<size_t> m_column_size;
std::vector< std::string > m_column_name;
};
std::string fixed_to_string(int64_t num, int scale);
std::string fixed_u_to_string(uint64_t num, int scale);
}
#endif
| 12,259 | 37.074534 | 117 | h |
null | ceph-main/src/common/Graylog.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef __CEPH_LOG_GRAYLOG_H
#define __CEPH_LOG_GRAYLOG_H
#include <boost/asio.hpp>
#include <boost/iostreams/filtering_stream.hpp>
#include <boost/iostreams/filter/zlib.hpp>
#include "include/ceph_assert.h" // boost clobbers this
struct uuid_d;
class LogEntry;
namespace ceph {
class Formatter;
namespace logging {
class Entry;
class SubsystemMap;
// Graylog logging backend: Convert log datastructures (LogEntry, Entry) to
// GELF (http://www.graylog2.org/resources/gelf/specification) and send it
// to a GELF UDP receiver
class Graylog
{
public:
/**
* Create Graylog with SubsystemMap. log_entry will resolve the subsystem
* id to string. Logging will not be ready until set_destination is called
* @param s SubsystemMap
* @param logger Value for key "_logger" in GELF
*/
Graylog(const SubsystemMap * const s, const std::string &logger);
/**
* Create Graylog without SubsystemMap. Logging will not be ready
* until set_destination is called
* @param logger Value for key "_logger" in GELF
*/
explicit Graylog(const std::string &logger);
virtual ~Graylog();
void set_hostname(const std::string& host);
void set_fsid(const uuid_d& fsid);
void set_destination(const std::string& host, int port);
void log_entry(const Entry& e);
void log_log_entry(LogEntry const * const e);
typedef std::shared_ptr<Graylog> Ref;
private:
SubsystemMap const * const m_subs;
bool m_log_dst_valid = false;
std::string m_hostname;
std::string m_fsid;
std::string m_logger;
boost::asio::ip::udp::endpoint m_endpoint;
boost::asio::io_service m_io_service;
std::unique_ptr<Formatter> m_formatter;
std::unique_ptr<Formatter> m_formatter_section;
std::stringstream m_ostream_section;
std::stringstream m_ostream_compressed;
boost::iostreams::filtering_ostream m_ostream;
boost::iostreams::zlib_compressor m_compressor;
};
}
}
#endif
| 2,008 | 22.916667 | 76 | h |
null | ceph-main/src/common/HTMLFormatter.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_HTML_FORMATTER_H
#define CEPH_HTML_FORMATTER_H
#include "Formatter.h"
namespace ceph {
class HTMLFormatter : public XMLFormatter {
public:
explicit HTMLFormatter(bool pretty = false);
~HTMLFormatter() override;
void reset() override;
void set_status(int status, const char* status_name) override;
void output_header() override;
void dump_unsigned(std::string_view name, uint64_t u) override;
void dump_int(std::string_view name, int64_t u) override;
void dump_float(std::string_view name, double d) override;
void dump_string(std::string_view name, std::string_view s) override;
std::ostream& dump_stream(std::string_view name) override;
void dump_format_va(std::string_view name, const char *ns, bool quoted, const char *fmt, va_list ap) override;
/* with attrs */
void dump_string_with_attrs(std::string_view name, std::string_view s, const FormatterAttrs& attrs) override;
private:
template <typename T> void dump_template(std::string_view name, T arg);
int m_status;
const char* m_status_name;
};
}
#endif
| 1,201 | 31.486486 | 114 | h |
null | ceph-main/src/common/HeartbeatMap.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_HEARTBEATMAP_H
#define CEPH_HEARTBEATMAP_H
#include <list>
#include <atomic>
#include <string>
#include <pthread.h>
#include "common/ceph_time.h"
#include "common/ceph_mutex.h"
#include "include/common_fwd.h"
namespace ceph {
/*
* HeartbeatMap -
*
* Maintain a set of handles for internal subsystems to periodically
* check in with a health check and timeout. Each user can register
* and get a handle they can use to set or reset a timeout.
*
* A simple is_healthy() method checks for any users who are not within
* their grace period for a heartbeat.
*/
struct heartbeat_handle_d {
const std::string name;
pthread_t thread_id = 0;
using clock = ceph::coarse_mono_clock;
using time = ceph::coarse_mono_time;
std::atomic<time> timeout = clock::zero();
std::atomic<time> suicide_timeout = clock::zero();
ceph::timespan grace = ceph::timespan::zero();
ceph::timespan suicide_grace = ceph::timespan::zero();
std::list<heartbeat_handle_d*>::iterator list_item;
explicit heartbeat_handle_d(const std::string& n)
: name(n)
{ }
};
class HeartbeatMap {
public:
// register/unregister
heartbeat_handle_d *add_worker(const std::string& name, pthread_t thread_id);
void remove_worker(const heartbeat_handle_d *h);
// reset the timeout so that it expects another touch within grace amount of time
void reset_timeout(heartbeat_handle_d *h,
ceph::timespan grace,
ceph::timespan suicide_grace);
// clear the timeout so that it's not checked on
void clear_timeout(heartbeat_handle_d *h);
// return false if any of the timeouts are currently expired.
bool is_healthy();
// touch cct->_conf->heartbeat_file if is_healthy()
void check_touch_file();
// get the number of unhealthy workers
int get_unhealthy_workers() const;
// get the number of total workers
int get_total_workers() const;
explicit HeartbeatMap(CephContext *cct);
~HeartbeatMap();
private:
using clock = ceph::coarse_mono_clock;
CephContext *m_cct;
ceph::shared_mutex m_rwlock =
ceph::make_shared_mutex("HeartbeatMap::m_rwlock");
clock::time_point m_inject_unhealthy_until;
std::list<heartbeat_handle_d*> m_workers;
std::atomic<unsigned> m_unhealthy_workers = { 0 };
std::atomic<unsigned> m_total_workers = { 0 };
bool _check(const heartbeat_handle_d *h, const char *who,
ceph::coarse_mono_time now);
};
}
#endif
| 2,828 | 27.29 | 83 | h |
null | ceph-main/src/common/Initialize.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 UnitedStack <[email protected]>
*
* Author: Haomai Wang <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
/* Copyright (c) 2011 Stanford University
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef CEPH_INITIALIZE_H
#define CEPH_INITIALIZE_H
/**
* This class is used to manage once-only initialization that should occur
* before main() is invoked, such as the creation of static variables. It
* also provides a mechanism for handling dependencies (where one class
* needs to perform its once-only initialization before another).
*
* The simplest way to use an Initialize object is to define a static
* initialization method for a class, say Foo::init(). Then, declare
* a static Initialize object in the class:
* "static Initialize initializer(Foo::init);".
* The result is that Foo::init will be invoked when the object is
* constructed (before main() is invoked). Foo::init can create static
* objects and perform any other once-only initialization needed by the
* class. Furthermore, if some other class needs to ensure that Foo has
* been initialized (e.g. as part of its own initialization) it can invoke
* Foo::init directly (Foo::init should contain an internal guard so that
* it only performs its functions once, even if invoked several times).
*
* There is also a second form of constructor for Initialize that causes a
* new object to be dynamically allocated and assigned to a pointer, instead
* of invoking a function. This form allows for the creation of static objects
* that are never destructed (thereby avoiding issues with the order of
* destruction).
*/
class Initialize {
public:
/**
* This form of constructor causes its function argument to be invoked
* when the object is constructed. When used with a static Initialize
* object, this will cause \p func to run before main() runs, so that
* \p func can perform once-only initialization.
*
* \param func
* This function is invoked with no arguments when the object is
* constructed. Typically the function will create static
* objects and/or invoke other initialization functions. The
* function should normally contain an internal guard so that it
* only performs its initialization the first time it is invoked.
*/
explicit Initialize(void (*func)()) {
(*func)();
}
/**
* This form of constructor causes a new object of a particular class
* to be constructed with a no-argument constructor and assigned to a
* given pointer. This form is typically used with a static Initialize
* object: the result is that the object will be created and assigned
* to the pointer before main() runs.
*
* \param p
* Pointer to an object of any type. If the pointer is NULL then
* it is replaced with a pointer to a newly allocated object of
* the given type.
*/
template<typename T>
explicit Initialize(T*& p) {
if (p == NULL) {
p = new T;
}
}
};
#endif // CEPH_INITIALIZE_H
| 4,046 | 40.721649 | 78 | h |
null | ceph-main/src/common/LRUSet.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <functional>
#include <boost/intrusive/list.hpp>
#include <boost/intrusive/unordered_set.hpp>
#include "include/encoding.h"
/// Combination of an LRU with fast hash-based membership lookup
template<class T, int NUM_BUCKETS=128>
class LRUSet {
/// internal node
struct Node
: boost::intrusive::unordered_set_base_hook<> {
// actual payload
T value;
// for the lru
boost::intrusive::list_member_hook<> lru_item;
Node(const T& v) : value(v) {}
friend std::size_t hash_value(const Node &node) {
return std::hash<T>{}(node.value);
}
friend bool operator<(const Node &a, const Node &b) {
return a.value < b.value;
}
friend bool operator>(const Node &a, const Node &b) {
return a.value > b.value;
}
friend bool operator==(const Node &a, const Node &b) {
return a.value == b.value;
}
};
struct NodeDeleteDisposer {
void operator()(Node *n) { delete n; }
};
// lru
boost::intrusive::list<
Node,
boost::intrusive::member_hook<Node,
boost::intrusive::list_member_hook<>,
&Node::lru_item>
> lru;
// hash-based set
typename boost::intrusive::unordered_set<Node>::bucket_type base_buckets[NUM_BUCKETS];
boost::intrusive::unordered_set<Node> set;
public:
LRUSet()
: set(typename boost::intrusive::unordered_set<Node>::bucket_traits(base_buckets,
NUM_BUCKETS))
{}
~LRUSet() {
clear();
}
LRUSet(const LRUSet& other)
: set(typename boost::intrusive::unordered_set<Node>::bucket_traits(base_buckets,
NUM_BUCKETS)) {
for (auto & i : other.lru) {
insert(i.value);
}
}
const LRUSet& operator=(const LRUSet& other) {
clear();
for (auto& i : other.lru) {
insert(i.value);
}
return *this;
}
size_t size() const {
return set.size();
}
bool empty() const {
return set.empty();
}
bool contains(const T& item) const {
return set.count(item) > 0;
}
void clear() {
prune(0);
}
void insert(const T& item) {
erase(item);
Node *n = new Node(item);
lru.push_back(*n);
set.insert(*n);
}
bool erase(const T& item) {
auto p = set.find(item);
if (p == set.end()) {
return false;
}
lru.erase(lru.iterator_to(*p));
set.erase_and_dispose(p, NodeDeleteDisposer());
return true;
}
void prune(size_t max) {
while (set.size() > max) {
auto p = lru.begin();
set.erase(*p);
lru.erase_and_dispose(p, NodeDeleteDisposer());
}
}
void encode(bufferlist& bl) const {
using ceph::encode;
ENCODE_START(1, 1, bl);
uint32_t n = set.size();
encode(n, bl);
auto p = set.begin();
while (n--) {
encode(p->value, bl);
++p;
}
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& p) {
using ceph::decode;
DECODE_START(1, p);
uint32_t n;
decode(n, p);
while (n--) {
T v;
decode(v, p);
insert(v);
}
DECODE_FINISH(p);
}
};
| 3,133 | 20.465753 | 88 | h |
null | ceph-main/src/common/LogClient.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_LOGCLIENT_H
#define CEPH_LOGCLIENT_H
#include <atomic>
#include "common/LogEntry.h"
#include "common/ceph_mutex.h"
#include "common/ostream_temp.h"
#include "common/ref.h"
#include "include/health.h"
class LogClient;
class MLog;
class MLogAck;
class Messenger;
class MonMap;
class Message;
struct uuid_d;
struct Connection;
class LogChannel;
namespace ceph {
namespace logging {
class Graylog;
}
}
struct clog_targets_conf_t {
std::string log_to_monitors;
std::string log_to_syslog;
std::string log_channels;
std::string log_prios;
std::string log_to_graylog;
std::string log_to_graylog_host;
std::string log_to_graylog_port;
uuid_d fsid; // only 16B. Simpler as a copy.
std::string host;
};
/** Manage where we output to and at which priority
*
* Not to be confused with the LogClient, which is the almighty coordinator
* of channels. We just deal with the boring part of the logging: send to
* syslog, send to file, generate LogEntry and queue it for the LogClient.
*
* Past queueing the LogEntry, the LogChannel is done with the whole thing.
* LogClient will deal with sending and handling of LogEntries.
*/
class LogChannel : public LoggerSinkSet
{
public:
LogChannel(CephContext *cct, LogClient *lc, const std::string &channel);
LogChannel(CephContext *cct, LogClient *lc,
const std::string &channel,
const std::string &facility,
const std::string &prio);
OstreamTemp debug() final {
return OstreamTemp(CLOG_DEBUG, this);
}
void debug(std::stringstream &s) final {
do_log(CLOG_DEBUG, s);
}
/**
* Convenience function mapping health status to
* the appropriate cluster log severity.
*/
OstreamTemp health(health_status_t health) {
switch(health) {
case HEALTH_OK:
return info();
case HEALTH_WARN:
return warn();
case HEALTH_ERR:
return error();
default:
// Invalid health_status_t value
ceph_abort();
}
}
OstreamTemp info() final {
return OstreamTemp(CLOG_INFO, this);
}
void info(std::stringstream &s) final {
do_log(CLOG_INFO, s);
}
OstreamTemp warn() final {
return OstreamTemp(CLOG_WARN, this);
}
void warn(std::stringstream &s) final {
do_log(CLOG_WARN, s);
}
OstreamTemp error() final {
return OstreamTemp(CLOG_ERROR, this);
}
void error(std::stringstream &s) final {
do_log(CLOG_ERROR, s);
}
OstreamTemp sec() final {
return OstreamTemp(CLOG_SEC, this);
}
void sec(std::stringstream &s) final {
do_log(CLOG_SEC, s);
}
void set_log_to_monitors(bool v);
void set_log_to_syslog(bool v) {
log_to_syslog = v;
}
void set_log_channel(const std::string& v) {
log_channel = v;
}
void set_log_prio(const std::string& v) {
log_prio = v;
}
void set_syslog_facility(const std::string& v) {
syslog_facility = v;
}
std::string get_log_prio() { return log_prio; }
std::string get_log_channel() { return log_channel; }
std::string get_syslog_facility() { return syslog_facility; }
bool must_log_to_syslog() { return log_to_syslog; }
/**
* Do we want to log to syslog?
*
* @return true if log_to_syslog is true and both channel and prio
* are not empty; false otherwise.
*/
bool do_log_to_syslog() {
return must_log_to_syslog() &&
!log_prio.empty() && !log_channel.empty();
}
bool must_log_to_monitors() { return log_to_monitors; }
bool do_log_to_graylog() {
return (graylog != nullptr);
}
typedef std::shared_ptr<LogChannel> Ref;
/**
* Query the configuration database in conf_cct for configuration
* parameters. Pick out the relevant values based on our channel name.
* Update the logger configuration based on these values.
*
* Return a collection of configuration strings.
*/
clog_targets_conf_t parse_client_options(CephContext* conf_cct);
void do_log(clog_type prio, std::stringstream& ss) final;
void do_log(clog_type prio, const std::string& s) final;
private:
CephContext *cct;
LogClient *parent;
ceph::mutex channel_lock = ceph::make_mutex("LogChannel::channel_lock");
std::string log_channel;
std::string log_prio;
std::string syslog_facility;
bool log_to_syslog;
bool log_to_monitors;
std::shared_ptr<ceph::logging::Graylog> graylog;
/**
* update config values from parsed k/v std::map for each config option
*/
void update_config(const clog_targets_conf_t& conf_strings);
clog_targets_conf_t parse_log_client_options(CephContext* conf_cct);
};
typedef LogChannel::Ref LogChannelRef;
class LogClient
{
public:
enum logclient_flag_t {
NO_FLAGS = 0,
FLAG_MON = 0x1,
};
LogClient(CephContext *cct, Messenger *m, MonMap *mm,
logclient_flag_t flags);
virtual ~LogClient() {
channels.clear();
}
bool handle_log_ack(MLogAck *m);
ceph::ref_t<Message> get_mon_log_message(bool flush);
bool are_pending();
LogChannelRef create_channel() {
return create_channel(CLOG_CHANNEL_DEFAULT);
}
LogChannelRef create_channel(const std::string& name) {
LogChannelRef c;
if (channels.count(name))
c = channels[name];
else {
c = std::make_shared<LogChannel>(cct, this, name);
channels[name] = c;
}
return c;
}
void destroy_channel(const std::string& name) {
if (channels.count(name))
channels.erase(name);
}
void shutdown() {
channels.clear();
}
uint64_t get_next_seq();
entity_addrvec_t get_myaddrs();
const EntityName& get_myname();
entity_name_t get_myrank();
version_t queue(LogEntry &entry);
void reset();
private:
ceph::ref_t<Message> _get_mon_log_message();
void _send_to_mon();
CephContext *cct;
Messenger *messenger;
MonMap *monmap;
bool is_mon;
ceph::mutex log_lock = ceph::make_mutex("LogClient::log_lock");
version_t last_log_sent;
version_t last_log;
std::deque<LogEntry> log_queue;
std::map<std::string, LogChannelRef> channels;
};
#endif
| 6,440 | 24.160156 | 75 | h |
null | ceph-main/src/common/LogEntry.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_LOGENTRY_H
#define CEPH_LOGENTRY_H
#include <fmt/format.h>
#include "include/utime.h"
#include "msg/msg_fmt.h"
#include "msg/msg_types.h"
#include "common/entity_name.h"
#include "ostream_temp.h"
#include "LRUSet.h"
namespace ceph {
class Formatter;
}
static const std::string CLOG_CHANNEL_NONE = "none";
static const std::string CLOG_CHANNEL_DEFAULT = "cluster";
static const std::string CLOG_CHANNEL_CLUSTER = "cluster";
static const std::string CLOG_CHANNEL_AUDIT = "audit";
// this is the key name used in the config options for the default, e.g.
// default=true foo=false bar=false
static const std::string CLOG_CONFIG_DEFAULT_KEY = "default";
/*
* Given a clog log_type, return the equivalent syslog priority
*/
int clog_type_to_syslog_level(clog_type t);
clog_type string_to_clog_type(const std::string& s);
int string_to_syslog_level(std::string s);
int string_to_syslog_facility(std::string s);
std::string clog_type_to_string(clog_type t);
struct LogEntryKey {
private:
uint64_t _hash = 0;
void _calc_hash() {
std::hash<entity_name_t> h;
_hash = seq + h(rank);
}
entity_name_t rank;
utime_t stamp;
uint64_t seq = 0;
public:
LogEntryKey() {}
LogEntryKey(const entity_name_t& w, utime_t t, uint64_t s)
: rank(w), stamp(t), seq(s) {
_calc_hash();
}
uint64_t get_hash() const {
return _hash;
}
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<LogEntryKey*>& o);
friend bool operator==(const LogEntryKey& l, const LogEntryKey& r) {
return l.rank == r.rank && l.stamp == r.stamp && l.seq == r.seq;
}
void encode(bufferlist& bl) const {
using ceph::encode;
encode(rank, bl);
encode(stamp, bl);
encode(seq, bl);
}
void decode(bufferlist::const_iterator &p) {
using ceph::decode;
decode(rank, p);
decode(stamp, p);
decode(seq, p);
}
};
WRITE_CLASS_ENCODER(LogEntryKey)
namespace std {
template<> struct hash<LogEntryKey> {
size_t operator()(const LogEntryKey& r) const {
return r.get_hash();
}
};
} // namespace std
struct LogEntry {
EntityName name;
entity_name_t rank;
entity_addrvec_t addrs;
utime_t stamp;
uint64_t seq;
clog_type prio;
std::string msg;
std::string channel;
LogEntry() : seq(0), prio(CLOG_DEBUG) {}
LogEntryKey key() const { return LogEntryKey(rank, stamp, seq); }
void log_to_syslog(std::string level, std::string facility) const;
void encode(ceph::buffer::list& bl, uint64_t features) const;
void decode(ceph::buffer::list::const_iterator& bl);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<LogEntry*>& o);
static clog_type str_to_level(std::string const &str);
};
WRITE_CLASS_ENCODER_FEATURES(LogEntry)
struct LogSummary {
version_t version;
// ---- pre-quincy ----
// channel -> [(seq#, entry), ...]
std::map<std::string,std::list<std::pair<uint64_t,LogEntry>>> tail_by_channel;
uint64_t seq = 0;
ceph::unordered_set<LogEntryKey> keys;
// ---- quincy+ ----
LRUSet<LogEntryKey> recent_keys;
std::map<std::string, std::pair<uint64_t,uint64_t>> channel_info; // channel -> [begin, end)
LogSummary() : version(0) {}
void build_ordered_tail_legacy(std::list<LogEntry> *tail) const;
void add_legacy(const LogEntry& e) {
keys.insert(e.key());
tail_by_channel[e.channel].push_back(std::make_pair(++seq, e));
}
void prune(size_t max) {
for (auto& i : tail_by_channel) {
while (i.second.size() > max) {
keys.erase(i.second.front().second.key());
i.second.pop_front();
}
}
recent_keys.prune(max);
}
bool contains(const LogEntryKey& k) const {
return keys.count(k) || recent_keys.contains(k);
}
void encode(ceph::buffer::list& bl, uint64_t features) const;
void decode(ceph::buffer::list::const_iterator& bl);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<LogSummary*>& o);
};
WRITE_CLASS_ENCODER_FEATURES(LogSummary)
inline std::ostream& operator<<(std::ostream& out, const clog_type t)
{
switch (t) {
case CLOG_DEBUG:
return out << "[DBG]";
case CLOG_INFO:
return out << "[INF]";
case CLOG_SEC:
return out << "[SEC]";
case CLOG_WARN:
return out << "[WRN]";
case CLOG_ERROR:
return out << "[ERR]";
default:
return out << "[???]";
}
}
inline std::ostream& operator<<(std::ostream& out, const LogEntry& e)
{
return out << e.stamp << " " << e.name << " (" << e.rank << ") "
<< e.seq << " : "
<< e.channel << " " << e.prio << " " << e.msg;
}
template <> struct fmt::formatter<EntityName> : fmt::formatter<std::string_view> {
template <typename FormatContext>
auto format(const EntityName& e, FormatContext& ctx) {
return formatter<std::string_view>::format(e.to_str(), ctx);
}
};
template <> struct fmt::formatter<LogEntry> : fmt::formatter<std::string_view> {
template <typename FormatContext>
auto format(const LogEntry& e, FormatContext& ctx) {
return fmt::format_to(ctx.out(), "{} {} ({}) {} : {} {} {}",
e.stamp, e.name, e.rank, e.seq, e.channel, e.prio, e.msg);
}
};
#endif
| 5,587 | 25.234742 | 94 | h |
null | ceph-main/src/common/OpQueue.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef OP_QUEUE_H
#define OP_QUEUE_H
#include "include/msgr.h"
#include <list>
#include <functional>
namespace ceph {
class Formatter;
}
/**
* Abstract class for all Op Queues
*
* In order to provide optimized code, be sure to declare all
* virtual functions as final in the derived class.
*/
template <typename T, typename K>
class OpQueue {
public:
// Ops of this class should be deleted immediately. If out isn't
// nullptr then items should be added to the front in
// front-to-back order. The typical strategy is to visit items in
// the queue in *reverse* order and to use *push_front* to insert
// them into out.
virtual void remove_by_class(K k, std::list<T> *out) = 0;
// Enqueue op in the back of the strict queue
virtual void enqueue_strict(K cl, unsigned priority, T &&item) = 0;
// Enqueue op in the front of the strict queue
virtual void enqueue_strict_front(K cl, unsigned priority, T &&item) = 0;
// Enqueue op in the back of the regular queue
virtual void enqueue(K cl, unsigned priority, unsigned cost, T &&item) = 0;
// Enqueue the op in the front of the regular queue
virtual void enqueue_front(
K cl, unsigned priority, unsigned cost, T &&item) = 0;
// Returns if the queue is empty
virtual bool empty() const = 0;
// Return an op to be dispatch
virtual T dequeue() = 0;
// Formatted output of the queue
virtual void dump(ceph::Formatter *f) const = 0;
// Human readable brief description of queue and relevant parameters
virtual void print(std::ostream &f) const = 0;
// Don't leak resources on destruction
virtual ~OpQueue() {};
};
#endif
| 2,063 | 26.891892 | 77 | h |
null | ceph-main/src/common/OutputDataSocket.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_COMMON_OUTPUTDATASOCKET_H
#define CEPH_COMMON_OUTPUTDATASOCKET_H
#include "common/ceph_mutex.h"
#include "common/Thread.h"
#include "include/common_fwd.h"
#include "include/buffer.h"
class OutputDataSocket : public Thread
{
public:
OutputDataSocket(CephContext *cct, uint64_t _backlog);
~OutputDataSocket() override;
bool init(const std::string &path);
void append_output(ceph::buffer::list& bl);
protected:
virtual void init_connection(ceph::buffer::list& bl) {}
void shutdown();
std::string create_shutdown_pipe(int *pipe_rd, int *pipe_wr);
std::string bind_and_listen(const std::string &sock_path, int *fd);
void *entry() override;
bool do_accept();
void handle_connection(int fd);
void close_connection(int fd);
int dump_data(int fd);
CephContext *m_cct;
uint64_t data_max_backlog;
std::string m_path;
int m_sock_fd;
int m_shutdown_rd_fd;
int m_shutdown_wr_fd;
bool going_down;
uint64_t data_size;
uint32_t skipped;
std::vector<ceph::buffer::list> data;
ceph::mutex m_lock = ceph::make_mutex("OutputDataSocket::m_lock");
ceph::condition_variable cond;
ceph::buffer::list delim;
};
#endif
| 1,586 | 22.338235 | 71 | h |
null | ceph-main/src/common/PluginRegistry.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2013,2014 Cloudwatt <[email protected]>
* Copyright (C) 2014 Red Hat <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CEPH_COMMON_PLUGINREGISTRY_H
#define CEPH_COMMON_PLUGINREGISTRY_H
#include <map>
#include <string>
#include "common/ceph_mutex.h"
#include "include/common_fwd.h"
extern "C" {
const char *__ceph_plugin_version();
int __ceph_plugin_init(CephContext *cct,
const std::string& type,
const std::string& name);
}
namespace ceph {
class Plugin {
public:
void *library;
CephContext *cct;
explicit Plugin(CephContext *cct) : library(NULL), cct(cct) {}
virtual ~Plugin() {}
};
class PluginRegistry {
public:
CephContext *cct;
ceph::mutex lock = ceph::make_mutex("PluginRegistery::lock");
bool loading;
bool disable_dlclose;
std::map<std::string,std::map<std::string,Plugin*> > plugins;
explicit PluginRegistry(CephContext *cct);
~PluginRegistry();
int add(const std::string& type, const std::string& name,
Plugin *factory);
int remove(const std::string& type, const std::string& name);
Plugin *get(const std::string& type, const std::string& name);
Plugin *get_with_load(const std::string& type, const std::string& name);
int load(const std::string& type,
const std::string& name);
int preload();
int preload(const std::string& type);
};
}
#endif
| 1,844 | 25.73913 | 76 | h |
null | ceph-main/src/common/Preforker.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_COMMON_PREFORKER_H
#define CEPH_COMMON_PREFORKER_H
#include <signal.h>
#include <sys/socket.h>
#include <sys/wait.h>
#include <unistd.h>
#include <sstream>
#include "common/errno.h"
#include "common/safe_io.h"
#include "include/ceph_assert.h"
#include "include/compat.h"
#include "include/sock_compat.h"
/**
* pre-fork fork/daemonize helper class
*
* Hide the details of letting a process fork early, do a bunch of
* initialization work that may spam stdout or exit with an error, and
* then daemonize. The exit() method will either exit directly (if we
* haven't forked) or pass a message to the parent with the error if
* we have.
*/
class Preforker {
pid_t childpid;
bool forked;
int fd[2]; // parent's, child's
public:
Preforker()
: childpid(0),
forked(false)
{}
int prefork(std::string &err) {
ceph_assert(!forked);
std::ostringstream oss;
int r = socketpair_cloexec(AF_UNIX, SOCK_STREAM, 0, fd);
if (r < 0) {
int e = errno;
oss << "[" << getpid() << "]: unable to create socketpair: " << cpp_strerror(e);
err = oss.str();
return (errno = e, -1);
}
struct sigaction sa;
sa.sa_handler = SIG_IGN;
sigemptyset(&sa.sa_mask);
sa.sa_flags = 0;
if (sigaction(SIGHUP, &sa, nullptr) != 0) {
int e = errno;
oss << "[" << getpid() << "]: unable to ignore SIGHUP: " << cpp_strerror(e);
err = oss.str();
return (errno = e, -1);
}
forked = true;
childpid = fork();
if (childpid < 0) {
int e = errno;
oss << "[" << getpid() << "]: unable to fork: " << cpp_strerror(e);
err = oss.str();
return (errno = e, -1);
}
if (is_child()) {
::close(fd[0]);
} else {
::close(fd[1]);
}
return 0;
}
int get_signal_fd() const {
return forked ? fd[1] : 0;
}
bool is_child() {
return childpid == 0;
}
bool is_parent() {
return childpid != 0;
}
int parent_wait(std::string &err_msg) {
ceph_assert(forked);
int r = -1;
std::ostringstream oss;
int err = safe_read_exact(fd[0], &r, sizeof(r));
if (err == 0 && r == -1) {
// daemonize
::close(0);
::close(1);
::close(2);
} else if (err) {
oss << "[" << getpid() << "]: " << cpp_strerror(err);
} else {
// wait for child to exit
int status;
err = waitpid(childpid, &status, 0);
if (err < 0) {
oss << "[" << getpid() << "]" << " waitpid error: " << cpp_strerror(err);
} else if (WIFSIGNALED(status)) {
oss << "[" << getpid() << "]" << " exited with a signal";
} else if (!WIFEXITED(status)) {
oss << "[" << getpid() << "]" << " did not exit normally";
} else {
err = WEXITSTATUS(status);
if (err != 0)
oss << "[" << getpid() << "]" << " returned exit_status " << cpp_strerror(err);
}
}
err_msg = oss.str();
return err;
}
int signal_exit(int r) {
if (forked) {
/* If we get an error here, it's too late to do anything reasonable about it. */
[[maybe_unused]] auto n = safe_write(fd[1], &r, sizeof(r));
}
return r;
}
void exit(int r) {
if (is_child())
signal_exit(r);
::exit(r);
}
void daemonize() {
ceph_assert(forked);
static int r = -1;
int r2 = ::write(fd[1], &r, sizeof(r));
r += r2; // make the compiler shut up about the unused return code from ::write(2).
}
};
#endif
| 3,578 | 23.682759 | 88 | h |
null | ceph-main/src/common/PrioritizedQueue.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef PRIORITY_QUEUE_H
#define PRIORITY_QUEUE_H
#include "include/ceph_assert.h"
#include "common/Formatter.h"
#include "common/OpQueue.h"
/**
* Manages queue for normal and strict priority items
*
* On dequeue, the queue will select the lowest priority queue
* such that the q has bucket > cost of front queue item.
*
* If there is no such queue, we choose the next queue item for
* the highest priority queue.
*
* Before returning a dequeued item, we place into each bucket
* cost * (priority/total_priority) tokens.
*
* enqueue_strict and enqueue_strict_front queue items into queues
* which are serviced in strict priority order before items queued
* with enqueue and enqueue_front
*
* Within a priority class, we schedule round robin based on the class
* of type K used to enqueue items. e.g. you could use entity_inst_t
* to provide fairness for different clients.
*/
template <typename T, typename K>
class PrioritizedQueue : public OpQueue <T, K> {
int64_t total_priority;
int64_t max_tokens_per_subqueue;
int64_t min_cost;
typedef std::list<std::pair<unsigned, T> > ListPairs;
struct SubQueue {
private:
typedef std::map<K, ListPairs> Classes;
Classes q;
unsigned tokens, max_tokens;
int64_t size;
typename Classes::iterator cur;
public:
SubQueue(const SubQueue &other)
: q(other.q),
tokens(other.tokens),
max_tokens(other.max_tokens),
size(other.size),
cur(q.begin()) {}
SubQueue()
: tokens(0),
max_tokens(0),
size(0), cur(q.begin()) {}
void set_max_tokens(unsigned mt) {
max_tokens = mt;
}
unsigned get_max_tokens() const {
return max_tokens;
}
unsigned num_tokens() const {
return tokens;
}
void put_tokens(unsigned t) {
tokens += t;
if (tokens > max_tokens) {
tokens = max_tokens;
}
}
void take_tokens(unsigned t) {
if (tokens > t) {
tokens -= t;
} else {
tokens = 0;
}
}
void enqueue(K cl, unsigned cost, T &&item) {
q[cl].push_back(std::make_pair(cost, std::move(item)));
if (cur == q.end())
cur = q.begin();
size++;
}
void enqueue_front(K cl, unsigned cost, T &&item) {
q[cl].push_front(std::make_pair(cost, std::move(item)));
if (cur == q.end())
cur = q.begin();
size++;
}
std::pair<unsigned, T> &front() const {
ceph_assert(!(q.empty()));
ceph_assert(cur != q.end());
return cur->second.front();
}
T pop_front() {
ceph_assert(!(q.empty()));
ceph_assert(cur != q.end());
T ret = std::move(cur->second.front().second);
cur->second.pop_front();
if (cur->second.empty()) {
q.erase(cur++);
} else {
++cur;
}
if (cur == q.end()) {
cur = q.begin();
}
size--;
return ret;
}
unsigned length() const {
ceph_assert(size >= 0);
return (unsigned)size;
}
bool empty() const {
return q.empty();
}
void remove_by_class(K k, std::list<T> *out) {
typename Classes::iterator i = q.find(k);
if (i == q.end()) {
return;
}
size -= i->second.size();
if (i == cur) {
++cur;
}
if (out) {
for (typename ListPairs::reverse_iterator j =
i->second.rbegin();
j != i->second.rend();
++j) {
out->push_front(std::move(j->second));
}
}
q.erase(i);
if (cur == q.end()) {
cur = q.begin();
}
}
void dump(ceph::Formatter *f) const {
f->dump_int("tokens", tokens);
f->dump_int("max_tokens", max_tokens);
f->dump_int("size", size);
f->dump_int("num_keys", q.size());
if (!empty()) {
f->dump_int("first_item_cost", front().first);
}
}
};
typedef std::map<unsigned, SubQueue> SubQueues;
SubQueues high_queue;
SubQueues queue;
SubQueue *create_queue(unsigned priority) {
typename SubQueues::iterator p = queue.find(priority);
if (p != queue.end()) {
return &p->second;
}
total_priority += priority;
SubQueue *sq = &queue[priority];
sq->set_max_tokens(max_tokens_per_subqueue);
return sq;
}
void remove_queue(unsigned priority) {
ceph_assert(queue.count(priority));
queue.erase(priority);
total_priority -= priority;
ceph_assert(total_priority >= 0);
}
void distribute_tokens(unsigned cost) {
if (total_priority == 0) {
return;
}
for (typename SubQueues::iterator i = queue.begin();
i != queue.end();
++i) {
i->second.put_tokens(((i->first * cost) / total_priority) + 1);
}
}
public:
PrioritizedQueue(unsigned max_per, unsigned min_c)
: total_priority(0),
max_tokens_per_subqueue(max_per),
min_cost(min_c)
{}
unsigned length() const {
unsigned total = 0;
for (typename SubQueues::const_iterator i = queue.begin();
i != queue.end();
++i) {
ceph_assert(i->second.length());
total += i->second.length();
}
for (typename SubQueues::const_iterator i = high_queue.begin();
i != high_queue.end();
++i) {
ceph_assert(i->second.length());
total += i->second.length();
}
return total;
}
void remove_by_class(K k, std::list<T> *out = 0) final {
for (typename SubQueues::iterator i = queue.begin();
i != queue.end();
) {
i->second.remove_by_class(k, out);
if (i->second.empty()) {
unsigned priority = i->first;
++i;
remove_queue(priority);
} else {
++i;
}
}
for (typename SubQueues::iterator i = high_queue.begin();
i != high_queue.end();
) {
i->second.remove_by_class(k, out);
if (i->second.empty()) {
high_queue.erase(i++);
} else {
++i;
}
}
}
void enqueue_strict(K cl, unsigned priority, T&& item) final {
high_queue[priority].enqueue(cl, 0, std::move(item));
}
void enqueue_strict_front(K cl, unsigned priority, T&& item) final {
high_queue[priority].enqueue_front(cl, 0, std::move(item));
}
void enqueue(K cl, unsigned priority, unsigned cost, T&& item) final {
if (cost < min_cost)
cost = min_cost;
if (cost > max_tokens_per_subqueue)
cost = max_tokens_per_subqueue;
create_queue(priority)->enqueue(cl, cost, std::move(item));
}
void enqueue_front(K cl, unsigned priority, unsigned cost, T&& item) final {
if (cost < min_cost)
cost = min_cost;
if (cost > max_tokens_per_subqueue)
cost = max_tokens_per_subqueue;
create_queue(priority)->enqueue_front(cl, cost, std::move(item));
}
bool empty() const final {
ceph_assert(total_priority >= 0);
ceph_assert((total_priority == 0) || !(queue.empty()));
return queue.empty() && high_queue.empty();
}
T dequeue() final {
ceph_assert(!empty());
if (!(high_queue.empty())) {
T ret = std::move(high_queue.rbegin()->second.front().second);
high_queue.rbegin()->second.pop_front();
if (high_queue.rbegin()->second.empty()) {
high_queue.erase(high_queue.rbegin()->first);
}
return ret;
}
// if there are multiple buckets/subqueues with sufficient tokens,
// we behave like a strict priority queue among all subqueues that
// are eligible to run.
for (typename SubQueues::iterator i = queue.begin();
i != queue.end();
++i) {
ceph_assert(!(i->second.empty()));
if (i->second.front().first < i->second.num_tokens()) {
unsigned cost = i->second.front().first;
i->second.take_tokens(cost);
T ret = std::move(i->second.front().second);
i->second.pop_front();
if (i->second.empty()) {
remove_queue(i->first);
}
distribute_tokens(cost);
return ret;
}
}
// if no subqueues have sufficient tokens, we behave like a strict
// priority queue.
unsigned cost = queue.rbegin()->second.front().first;
T ret = std::move(queue.rbegin()->second.front().second);
queue.rbegin()->second.pop_front();
if (queue.rbegin()->second.empty()) {
remove_queue(queue.rbegin()->first);
}
distribute_tokens(cost);
return ret;
}
void dump(ceph::Formatter *f) const final {
f->dump_int("total_priority", total_priority);
f->dump_int("max_tokens_per_subqueue", max_tokens_per_subqueue);
f->dump_int("min_cost", min_cost);
f->open_array_section("high_queues");
for (typename SubQueues::const_iterator p = high_queue.begin();
p != high_queue.end();
++p) {
f->open_object_section("subqueue");
f->dump_int("priority", p->first);
p->second.dump(f);
f->close_section();
}
f->close_section();
f->open_array_section("queues");
for (typename SubQueues::const_iterator p = queue.begin();
p != queue.end();
++p) {
f->open_object_section("subqueue");
f->dump_int("priority", p->first);
p->second.dump(f);
f->close_section();
}
f->close_section();
}
void print(std::ostream &ostream) const final {
ostream << "PrioritizedQueue";
}
};
#endif
| 9,370 | 25.546742 | 78 | h |
null | ceph-main/src/common/PriorityCache.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2018 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_PRIORITY_CACHE_H
#define CEPH_PRIORITY_CACHE_H
#include <stdint.h>
#include <string>
#include <vector>
#include <memory>
#include <unordered_map>
#include "common/perf_counters.h"
#include "include/ceph_assert.h"
namespace PriorityCache {
// Reserve 16384 slots for PriorityCache perf counters
const int PERF_COUNTER_LOWER_BOUND = 1073741824;
const int PERF_COUNTER_MAX_BOUND = 1073758208;
enum MallocStats {
M_FIRST = PERF_COUNTER_LOWER_BOUND,
M_TARGET_BYTES,
M_MAPPED_BYTES,
M_UNMAPPED_BYTES,
M_HEAP_BYTES,
M_CACHE_BYTES,
M_LAST,
};
enum Priority {
PRI0,
PRI1,
PRI2,
PRI3,
PRI4,
PRI5,
PRI6,
PRI7,
PRI8,
PRI9,
PRI10,
PRI11,
LAST = PRI11,
};
enum Extra {
E_RESERVED = Priority::LAST+1,
E_COMMITTED,
E_LAST = E_COMMITTED,
};
int64_t get_chunk(uint64_t usage, uint64_t total_bytes);
struct PriCache {
virtual ~PriCache();
/* Ask the cache to request memory for the given priority. Note that the
* cache may ultimately be allocated less memory than it requests here.
*/
virtual int64_t request_cache_bytes(PriorityCache::Priority pri, uint64_t total_cache) const = 0;
// Get the number of bytes currently allocated to the given priority.
virtual int64_t get_cache_bytes(PriorityCache::Priority pri) const = 0;
// Get the number of bytes currently allocated to all priorities.
virtual int64_t get_cache_bytes() const = 0;
// Allocate bytes for a given priority.
virtual void set_cache_bytes(PriorityCache::Priority pri, int64_t bytes) = 0;
// Allocate additional bytes for a given priority.
virtual void add_cache_bytes(PriorityCache::Priority pri, int64_t bytes) = 0;
/* Commit the current number of bytes allocated to the cache. Space is
* allocated in chunks based on the allocation size and current total size
* of memory available for caches. */
virtual int64_t commit_cache_size(uint64_t total_cache) = 0;
/* Get the current number of bytes allocated to the cache. this may be
* larger than the value returned by get_cache_bytes as it includes extra
* space for future growth. */
virtual int64_t get_committed_size() const = 0;
// Get the ratio of available memory this cache should target.
virtual double get_cache_ratio() const = 0;
// Set the ratio of available memory this cache should target.
virtual void set_cache_ratio(double ratio) = 0;
// Get the name of this cache.
virtual std::string get_cache_name() const = 0;
// Rotate the bins
virtual void shift_bins() = 0;
// Import user bins (from PRI1 to LAST-1)
virtual void import_bins(const std::vector<uint64_t> &bins) = 0;
// Set bins (PRI0 and LAST should be ignored)
virtual void set_bins(PriorityCache::Priority pri, uint64_t end_bin) = 0;
// Get bins
virtual uint64_t get_bins(PriorityCache::Priority pri) const = 0;
};
class Manager {
CephContext* cct = nullptr;
PerfCounters* logger;
std::unordered_map<std::string, PerfCounters*> loggers;
std::unordered_map<std::string, std::vector<int>> indexes;
std::unordered_map<std::string, std::shared_ptr<PriCache>> caches;
// Start perf counter slots after the malloc stats.
int cur_index = MallocStats::M_LAST;
uint64_t min_mem = 0;
uint64_t max_mem = 0;
uint64_t target_mem = 0;
uint64_t tuned_mem = 0;
bool reserve_extra;
std::string name;
public:
Manager(CephContext *c, uint64_t min, uint64_t max, uint64_t target,
bool reserve_extra, const std::string& name = std::string());
~Manager();
void set_min_memory(uint64_t min) {
min_mem = min;
}
void set_max_memory(uint64_t max) {
max_mem = max;
}
void set_target_memory(uint64_t target) {
target_mem = target;
}
uint64_t get_tuned_mem() const {
return tuned_mem;
}
void insert(const std::string& name, const std::shared_ptr<PriCache> c,
bool enable_perf_counters);
void erase(const std::string& name);
void clear();
void tune_memory();
void balance();
void shift_bins();
private:
void balance_priority(int64_t *mem_avail, Priority pri);
};
}
#endif
| 4,692 | 27.969136 | 101 | h |
null | ceph-main/src/common/RWLock.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_RWLock_Posix__H
#define CEPH_RWLock_Posix__H
#include <pthread.h>
#include <string>
#include "include/ceph_assert.h"
#include "acconfig.h"
#include "lockdep.h"
#include "common/valgrind.h"
#include <atomic>
class RWLock final
{
mutable pthread_rwlock_t L;
std::string name;
mutable int id;
mutable std::atomic<unsigned> nrlock = { 0 }, nwlock = { 0 };
bool track, lockdep;
std::string unique_name(const char* name) const;
public:
RWLock(const RWLock& other) = delete;
const RWLock& operator=(const RWLock& other) = delete;
RWLock(const std::string &n, bool track_lock=true, bool ld=true, bool prioritize_write=false)
: name(n), id(-1), track(track_lock),
lockdep(ld) {
#if defined(HAVE_PTHREAD_RWLOCKATTR_SETKIND_NP)
if (prioritize_write) {
pthread_rwlockattr_t attr;
pthread_rwlockattr_init(&attr);
// PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP
// Setting the lock kind to this avoids writer starvation as long as
// long as any read locking is not done in a recursive fashion.
pthread_rwlockattr_setkind_np(&attr,
PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP);
pthread_rwlock_init(&L, &attr);
pthread_rwlockattr_destroy(&attr);
} else
#endif
// Next block is in {} to possibly connect to the above if when code is used.
{
pthread_rwlock_init(&L, NULL);
}
ANNOTATE_BENIGN_RACE_SIZED(&id, sizeof(id), "RWLock lockdep id");
ANNOTATE_BENIGN_RACE_SIZED(&nrlock, sizeof(nrlock), "RWlock nrlock");
ANNOTATE_BENIGN_RACE_SIZED(&nwlock, sizeof(nwlock), "RWlock nwlock");
if (lockdep && g_lockdep) id = lockdep_register(name.c_str());
}
bool is_locked() const {
ceph_assert(track);
return (nrlock > 0) || (nwlock > 0);
}
bool is_wlocked() const {
ceph_assert(track);
return (nwlock > 0);
}
~RWLock() {
// The following check is racy but we are about to destroy
// the object and we assume that there are no other users.
if (track)
ceph_assert(!is_locked());
pthread_rwlock_destroy(&L);
if (lockdep && g_lockdep) {
lockdep_unregister(id);
}
}
void unlock(bool lockdep=true) const {
if (track) {
if (nwlock > 0) {
nwlock--;
} else {
ceph_assert(nrlock > 0);
nrlock--;
}
}
if (lockdep && this->lockdep && g_lockdep)
id = lockdep_will_unlock(name.c_str(), id);
int r = pthread_rwlock_unlock(&L);
ceph_assert(r == 0);
}
// read
void get_read() const {
if (lockdep && g_lockdep) id = lockdep_will_lock(name.c_str(), id);
int r = pthread_rwlock_rdlock(&L);
ceph_assert(r == 0);
if (lockdep && g_lockdep) id = lockdep_locked(name.c_str(), id);
if (track)
nrlock++;
}
bool try_get_read() const {
if (pthread_rwlock_tryrdlock(&L) == 0) {
if (track)
nrlock++;
if (lockdep && g_lockdep) id = lockdep_locked(name.c_str(), id);
return true;
}
return false;
}
void put_read() const {
unlock();
}
void lock_shared() {
get_read();
}
void unlock_shared() {
put_read();
}
// write
void get_write(bool lockdep=true) {
if (lockdep && this->lockdep && g_lockdep)
id = lockdep_will_lock(name.c_str(), id);
int r = pthread_rwlock_wrlock(&L);
ceph_assert(r == 0);
if (lockdep && this->lockdep && g_lockdep)
id = lockdep_locked(name.c_str(), id);
if (track)
nwlock++;
}
bool try_get_write(bool lockdep=true) {
if (pthread_rwlock_trywrlock(&L) == 0) {
if (lockdep && this->lockdep && g_lockdep)
id = lockdep_locked(name.c_str(), id);
if (track)
nwlock++;
return true;
}
return false;
}
void put_write() {
unlock();
}
void lock() {
get_write();
}
void get(bool for_write) {
if (for_write) {
get_write();
} else {
get_read();
}
}
public:
class RLocker {
const RWLock &m_lock;
bool locked;
public:
explicit RLocker(const RWLock& lock) : m_lock(lock) {
m_lock.get_read();
locked = true;
}
void unlock() {
ceph_assert(locked);
m_lock.unlock();
locked = false;
}
~RLocker() {
if (locked) {
m_lock.unlock();
}
}
};
class WLocker {
RWLock &m_lock;
bool locked;
public:
explicit WLocker(RWLock& lock) : m_lock(lock) {
m_lock.get_write();
locked = true;
}
void unlock() {
ceph_assert(locked);
m_lock.unlock();
locked = false;
}
~WLocker() {
if (locked) {
m_lock.unlock();
}
}
};
class Context {
RWLock& lock;
public:
enum LockState {
Untaken = 0,
TakenForRead = 1,
TakenForWrite = 2,
};
private:
LockState state;
public:
explicit Context(RWLock& l) : lock(l), state(Untaken) {}
Context(RWLock& l, LockState s) : lock(l), state(s) {}
void get_write() {
ceph_assert(state == Untaken);
lock.get_write();
state = TakenForWrite;
}
void get_read() {
ceph_assert(state == Untaken);
lock.get_read();
state = TakenForRead;
}
void unlock() {
ceph_assert(state != Untaken);
lock.unlock();
state = Untaken;
}
void promote() {
ceph_assert(state == TakenForRead);
unlock();
get_write();
}
LockState get_state() { return state; }
void set_state(LockState s) {
state = s;
}
bool is_locked() {
return (state != Untaken);
}
bool is_rlocked() {
return (state == TakenForRead);
}
bool is_wlocked() {
return (state == TakenForWrite);
}
};
};
#endif // !CEPH_RWLock_Posix__H
| 6,180 | 21.476364 | 95 | h |
null | ceph-main/src/common/Readahead.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_READAHEAD_H
#define CEPH_READAHEAD_H
#include <list>
#include <vector>
#include "include/Context.h"
#include "common/ceph_mutex.h"
/**
This class provides common state and logic for code that needs to perform readahead
on linear things such as RBD images or files.
Unless otherwise specified, all methods are thread-safe.
Minimum and maximum readahead sizes may be violated by up to 50\% if alignment is enabled.
Minimum readahead size may be violated if the end of the readahead target is reached.
*/
class Readahead {
public:
typedef std::pair<uint64_t, uint64_t> extent_t;
// equal to UINT64_MAX
static const uint64_t NO_LIMIT = 18446744073709551615ULL;
Readahead();
~Readahead();
/**
Update state with new reads and return readahead to be performed.
If the length of the returned extent is 0, no readahead should be performed.
The readahead extent is guaranteed not to pass \c limit.
Note that passing in NO_LIMIT as the limit and truncating the returned extent
is not the same as passing in the correct limit, because the internal state
will differ in the two cases.
@param extents read operations since last call to update
@param limit size of the thing readahead is being applied to
*/
extent_t update(const std::vector<extent_t>& extents, uint64_t limit);
/**
Update state with a new read and return readahead to be performed.
If the length of the returned extent is 0, no readahead should be performed.
The readahead extent is guaranteed not to pass \c limit.
Note that passing in NO_LIMIT as the limit and truncating the returned extent
is not the same as passing in the correct limit, because the internal state
will differ in the two cases.
@param offset offset of the read operation
@param length length of the read operation
@param limit size of the thing readahead is being applied to
*/
extent_t update(uint64_t offset, uint64_t length, uint64_t limit);
/**
Increment the pending counter.
*/
void inc_pending(int count = 1);
/**
Decrement the pending counter.
The counter must not be decremented below 0.
*/
void dec_pending(int count = 1);
/**
Waits until the pending count reaches 0.
*/
void wait_for_pending();
void wait_for_pending(Context *ctx);
/**
Sets the number of sequential requests necessary to trigger readahead.
*/
void set_trigger_requests(int trigger_requests);
/**
Gets the minimum size of a readahead request, in bytes.
*/
uint64_t get_min_readahead_size(void);
/**
Gets the maximum size of a readahead request, in bytes.
*/
uint64_t get_max_readahead_size(void);
/**
Sets the minimum size of a readahead request, in bytes.
*/
void set_min_readahead_size(uint64_t min_readahead_size);
/**
Sets the maximum size of a readahead request, in bytes.
*/
void set_max_readahead_size(uint64_t max_readahead_size);
/**
Sets the alignment units.
If the end point of a readahead request can be aligned to an alignment unit
by increasing or decreasing the size of the request by 50\% or less, it will.
Alignments are tested in order, so larger numbers should almost always come first.
*/
void set_alignments(const std::vector<uint64_t> &alignments);
private:
/**
Records that a read request has been received.
m_lock must be held while calling.
*/
void _observe_read(uint64_t offset, uint64_t length);
/**
Computes the next readahead request.
m_lock must be held while calling.
*/
extent_t _compute_readahead(uint64_t limit);
/// Number of sequential requests necessary to trigger readahead
int m_trigger_requests;
/// Minimum size of a readahead request, in bytes
uint64_t m_readahead_min_bytes;
/// Maximum size of a readahead request, in bytes
uint64_t m_readahead_max_bytes;
/// Alignment units, in bytes
std::vector<uint64_t> m_alignments;
/// Held while reading/modifying any state except m_pending
ceph::mutex m_lock = ceph::make_mutex("Readahead::m_lock");
/// Number of consecutive read requests in the current sequential stream
int m_nr_consec_read;
/// Number of bytes read in the current sequenial stream
uint64_t m_consec_read_bytes;
/// Position of the read stream
uint64_t m_last_pos;
/// Position of the readahead stream
uint64_t m_readahead_pos;
/// When readahead is already triggered and the read stream crosses this point, readahead is continued
uint64_t m_readahead_trigger_pos;
/// Size of the next readahead request (barring changes due to alignment, etc.)
uint64_t m_readahead_size;
/// Number of pending readahead requests, as determined by inc_pending() and dec_pending()
int m_pending;
/// Lock for m_pending
ceph::mutex m_pending_lock = ceph::make_mutex("Readahead::m_pending_lock");
/// Waiters for pending readahead
std::list<Context *> m_pending_waiting;
};
#endif
| 5,113 | 29.440476 | 104 | h |
null | ceph-main/src/common/RefCountedObj.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_REFCOUNTEDOBJ_H
#define CEPH_REFCOUNTEDOBJ_H
#include "common/ceph_mutex.h"
#include "common/ref.h"
#include "include/common_fwd.h"
#include <atomic>
/* This class provides mechanisms to make a sub-class work with
* boost::intrusive_ptr (aka ceph::ref_t).
*
* Generally, you'll want to inherit from RefCountedObjectSafe and not from
* RefCountedObject directly. This is because the ::get and ::put methods are
* public and can be used to create/delete references outside of the
* ceph::ref_t pointers with the potential to leak memory.
*
* It is also suggested that you make constructors and destructors private in
* your final class. This prevents instantiation of the object with assignment
* to a raw pointer. Consequently, you'll want to use ceph::make_ref<> to
* create a ceph::ref_t<> holding your object:
*
* auto ptr = ceph::make_ref<Foo>(...);
*
* Use FRIEND_MAKE_REF(ClassName) to allow ceph::make_ref to call the private
* constructors.
*
*/
namespace TOPNSPC::common {
class RefCountedObject {
public:
void set_cct(CephContext *c) {
cct = c;
}
uint64_t get_nref() const {
return nref;
}
const RefCountedObject *get() const {
_get();
return this;
}
RefCountedObject *get() {
_get();
return this;
}
void put() const;
protected:
RefCountedObject() = default;
RefCountedObject(const RefCountedObject& o) : cct(o.cct) {}
RefCountedObject& operator=(const RefCountedObject& o) = delete;
RefCountedObject(RefCountedObject&&) = delete;
RefCountedObject& operator=(RefCountedObject&&) = delete;
RefCountedObject(CephContext* c) : cct(c) {}
virtual ~RefCountedObject();
private:
void _get() const;
mutable std::atomic<uint64_t> nref{1};
CephContext *cct{nullptr};
};
class RefCountedObjectSafe : public RefCountedObject {
public:
RefCountedObject *get() = delete;
const RefCountedObject *get() const = delete;
void put() const = delete;
protected:
template<typename... Args>
RefCountedObjectSafe(Args&&... args) : RefCountedObject(std::forward<Args>(args)...) {}
virtual ~RefCountedObjectSafe() override {}
};
#if !defined(WITH_SEASTAR)|| defined(WITH_ALIEN)
/**
* RefCountedCond
*
* a refcounted condition, will be removed when all references are dropped
*/
struct RefCountedCond : public RefCountedObject {
RefCountedCond() = default;
~RefCountedCond() = default;
int wait() {
std::unique_lock l(lock);
while (!complete) {
cond.wait(l);
}
return rval;
}
void done(int r) {
std::lock_guard l(lock);
rval = r;
complete = true;
cond.notify_all();
}
void done() {
done(0);
}
private:
bool complete = false;
ceph::mutex lock = ceph::make_mutex("RefCountedCond::lock");
ceph::condition_variable cond;
int rval = 0;
};
/**
* RefCountedWaitObject
*
* refcounted object that allows waiting for the object's last reference.
* Any referrer can either put or put_wait(). A simple put() will return
* immediately, a put_wait() will return only when the object is destroyed.
* e.g., useful when we want to wait for a specific event completion. We
* use RefCountedCond, as the condition can be referenced after the object
* destruction.
*
*/
struct RefCountedWaitObject {
std::atomic<uint64_t> nref = { 1 };
RefCountedCond *c;
RefCountedWaitObject() {
c = new RefCountedCond;
}
virtual ~RefCountedWaitObject() {
c->put();
}
RefCountedWaitObject *get() {
nref++;
return this;
}
bool put() {
bool ret = false;
RefCountedCond *cond = c;
cond->get();
if (--nref == 0) {
cond->done();
delete this;
ret = true;
}
cond->put();
return ret;
}
void put_wait() {
RefCountedCond *cond = c;
cond->get();
if (--nref == 0) {
cond->done();
delete this;
} else {
cond->wait();
}
cond->put();
}
};
#endif // !defined(WITH_SEASTAR)|| defined(WITH_ALIEN)
static inline void intrusive_ptr_add_ref(const RefCountedObject *p) {
p->get();
}
static inline void intrusive_ptr_release(const RefCountedObject *p) {
p->put();
}
struct UniquePtrDeleter
{
void operator()(RefCountedObject *p) const
{
// Don't expect a call to `get()` in the ctor as we manually set nref to 1
p->put();
}
};
}
using RefCountedPtr = ceph::ref_t<TOPNSPC::common::RefCountedObject>;
#endif
| 4,821 | 22.753695 | 89 | h |
null | ceph-main/src/common/SloppyCRCMap.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_COMMON_SLOPPYCRCMAP_H
#define CEPH_COMMON_SLOPPYCRCMAP_H
#include "include/encoding.h"
namespace ceph {
class Formatter;
}
/**
* SloppyCRCMap
*
* Opportunistically track CRCs on any reads or writes that cover full
* blocks. Verify read results when we have CRC data available for
* the given extent.
*/
class SloppyCRCMap {
static const int crc_iv = 0xffffffff;
std::map<uint64_t, uint32_t> crc_map; // offset -> crc(-1)
uint32_t block_size;
uint32_t zero_crc;
public:
SloppyCRCMap(uint32_t b=0) {
set_block_size(b);
}
void set_block_size(uint32_t b) {
block_size = b;
//zero_crc = ceph_crc32c(0xffffffff, NULL, block_size);
if (b) {
ceph::buffer::list bl;
bl.append_zero(block_size);
zero_crc = bl.crc32c(crc_iv);
} else {
zero_crc = crc_iv;
}
}
/// update based on a write
void write(uint64_t offset, uint64_t len, const ceph::buffer::list& bl,
std::ostream *out = NULL);
/// update based on a truncate
void truncate(uint64_t offset);
/// update based on a zero/punch_hole
void zero(uint64_t offset, uint64_t len);
/// update based on a zero/punch_hole
void clone_range(uint64_t offset, uint64_t len, uint64_t srcoff, const SloppyCRCMap& src,
std::ostream *out = NULL);
/**
* validate a read result
*
* @param offset offset
* @param length length
* @param bl data read
* @param err option ostream to describe errors in detail
* @returns error count, 0 for success
*/
int read(uint64_t offset, uint64_t len, const ceph::buffer::list& bl, std::ostream *err);
void encode(ceph::buffer::list& bl) const;
void decode(ceph::buffer::list::const_iterator& bl);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<SloppyCRCMap*>& ls);
};
WRITE_CLASS_ENCODER(SloppyCRCMap)
#endif
| 1,964 | 24.519481 | 91 | h |
null | ceph-main/src/common/StackStringStream.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2018 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef COMMON_STACKSTRINGSTREAM_H
#define COMMON_STACKSTRINGSTREAM_H
#include <boost/container/small_vector.hpp>
#include <algorithm>
#include <iostream>
#include <memory>
#include <ostream>
#include <sstream>
#include <string_view>
#include <vector>
#include "include/inline_memory.h"
template<std::size_t SIZE>
class StackStringBuf : public std::basic_streambuf<char>
{
public:
StackStringBuf()
: vec{SIZE, boost::container::default_init_t{}}
{
setp(vec.data(), vec.data() + vec.size());
}
StackStringBuf(const StackStringBuf&) = delete;
StackStringBuf& operator=(const StackStringBuf&) = delete;
StackStringBuf(StackStringBuf&& o) = delete;
StackStringBuf& operator=(StackStringBuf&& o) = delete;
~StackStringBuf() override = default;
void clear()
{
vec.resize(SIZE);
setp(vec.data(), vec.data() + SIZE);
}
std::string_view strv() const
{
return std::string_view(pbase(), pptr() - pbase());
}
protected:
std::streamsize xsputn(const char *s, std::streamsize n) final
{
std::streamsize capacity = epptr() - pptr();
std::streamsize left = n;
if (capacity >= left) {
maybe_inline_memcpy(pptr(), s, left, 32);
pbump(left);
} else {
maybe_inline_memcpy(pptr(), s, capacity, 64);
s += capacity;
left -= capacity;
vec.insert(vec.end(), s, s + left);
setp(vec.data(), vec.data() + vec.size());
pbump(vec.size());
}
return n;
}
int overflow(int c) final
{
if (traits_type::not_eof(c)) {
char str = traits_type::to_char_type(c);
vec.push_back(str);
return c;
} else {
return traits_type::eof();
}
}
private:
boost::container::small_vector<char, SIZE> vec;
};
template<std::size_t SIZE>
class StackStringStream : public std::basic_ostream<char>
{
public:
StackStringStream() : basic_ostream<char>(&ssb), default_fmtflags(flags()) {}
StackStringStream(const StackStringStream& o) = delete;
StackStringStream& operator=(const StackStringStream& o) = delete;
StackStringStream(StackStringStream&& o) = delete;
StackStringStream& operator=(StackStringStream&& o) = delete;
~StackStringStream() override = default;
void reset() {
clear(); /* reset state flags */
flags(default_fmtflags); /* reset fmtflags to constructor defaults */
ssb.clear();
}
std::string_view strv() const {
return ssb.strv();
}
std::string str() const {
return std::string(ssb.strv());
}
private:
StackStringBuf<SIZE> ssb;
fmtflags const default_fmtflags;
};
/* In an ideal world, we could use StackStringStream indiscriminately, but alas
* it's very expensive to construct/destruct. So, we cache them in a
* thread_local vector. DO NOT share these with other threads. The copy/move
* constructors are deliberately restrictive to make this more difficult to
* accidentally do.
*/
class CachedStackStringStream {
public:
using sss = StackStringStream<4096>;
using osptr = std::unique_ptr<sss>;
CachedStackStringStream() {
if (cache.destructed || cache.c.empty()) {
osp = std::make_unique<sss>();
} else {
osp = std::move(cache.c.back());
cache.c.pop_back();
osp->reset();
}
}
CachedStackStringStream(const CachedStackStringStream&) = delete;
CachedStackStringStream& operator=(const CachedStackStringStream&) = delete;
CachedStackStringStream(CachedStackStringStream&&) = delete;
CachedStackStringStream& operator=(CachedStackStringStream&&) = delete;
~CachedStackStringStream() {
if (!cache.destructed && cache.c.size() < max_elems) {
cache.c.emplace_back(std::move(osp));
}
}
sss& operator*() {
return *osp;
}
sss const& operator*() const {
return *osp;
}
sss* operator->() {
return osp.get();
}
sss const* operator->() const {
return osp.get();
}
sss const* get() const {
return osp.get();
}
sss* get() {
return osp.get();
}
private:
static constexpr std::size_t max_elems = 8;
/* The thread_local cache may be destructed before other static structures.
* If those destructors try to create a CachedStackStringStream (e.g. for
* logging) and access this cache, that access will be undefined. So note if
* the cache has been destructed and check before use.
*/
struct Cache {
using container = std::vector<osptr>;
Cache() {}
~Cache() { destructed = true; }
container c;
bool destructed = false;
};
inline static thread_local Cache cache;
osptr osp;
};
#endif
| 4,934 | 24.569948 | 79 | h |
null | ceph-main/src/common/TextTable.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2012 Inktank Storage, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef TEXT_TABLE_H_
#define TEXT_TABLE_H_
#include <vector>
#include <sstream>
#include "include/ceph_assert.h"
/**
* TextTable:
* Manage tabular output of data. Caller defines heading of each column
* and alignment of heading and column data,
* then inserts rows of data including tuples of
* length (ncolumns) terminated by TextTable::endrow. When all rows
* are inserted, caller asks for output with ostream <<
* which sizes/pads/dumps the table to ostream.
*
* Columns autosize to largest heading or datum. One space is printed
* between columns.
*/
class TextTable {
public:
enum Align {LEFT = 1, CENTER, RIGHT};
private:
struct TextTableColumn {
std::string heading;
int width;
Align hd_align;
Align col_align;
TextTableColumn() {}
TextTableColumn(const std::string &h, int w, Align ha, Align ca) :
heading(h), width(w), hd_align(ha), col_align(ca) { }
~TextTableColumn() {}
};
std::vector<TextTableColumn> col; // column definitions
unsigned int curcol, currow; // col, row being inserted into
unsigned int indent; // indent width when rendering
std::string column_separation = {" "};
protected:
std::vector<std::vector<std::string> > row; // row data array
public:
TextTable(): curcol(0), currow(0), indent(0) {}
~TextTable() {}
/**
* Define a column in the table.
*
* @param heading Column heading string (or "")
* @param hd_align Alignment for heading in column
* @param col_align Data alignment
*
* @note alignment is of type TextTable::Align; values are
* TextTable::LEFT, TextTable::CENTER, or TextTable::RIGHT
*
*/
void define_column(const std::string& heading, Align hd_align,
Align col_align);
/**
* Set indent for table. Only affects table output.
*
* @param i Number of spaces to indent
*/
void set_indent(int i) { indent = i; }
/**
* Set column separation
*
* @param s String to separate columns
*/
void set_column_separation(const std::string& s) {
column_separation = s;
}
/**
* Add item to table, perhaps on new row.
* table << val1 << val2 << TextTable::endrow;
*
* @param: value to output.
*
* @note: Numerics are output in decimal; strings are not truncated.
* Output formatting choice is limited to alignment in define_column().
*
* @return TextTable& for chaining.
*/
template<typename T> TextTable& operator<<(const T& item)
{
if (row.size() < currow + 1)
row.resize(currow + 1);
/**
* col.size() is a good guess for how big row[currow] needs to be,
* so just expand it out now
*/
if (row[currow].size() < col.size()) {
row[currow].resize(col.size());
}
// inserting more items than defined columns is a coding error
ceph_assert(curcol + 1 <= col.size());
// get rendered width of item alone
std::ostringstream oss;
oss << item;
int width = oss.str().length();
oss.seekp(0);
// expand column width if necessary
if (width > col[curcol].width) {
col[curcol].width = width;
}
// now store the rendered item with its proper width
row[currow][curcol] = oss.str();
curcol++;
return *this;
}
/**
* Degenerate type/variable here is just to allow selection of the
* following operator<< for "<< TextTable::endrow"
*/
struct endrow_t {};
static constexpr endrow_t endrow{};
/**
* Implements TextTable::endrow
*/
TextTable &operator<<(endrow_t)
{
curcol = 0;
currow++;
return *this;
}
/**
* Render table to ostream (i.e. cout << table)
*/
friend std::ostream &operator<<(std::ostream &out, const TextTable &t);
/**
* clear: Reset everything in a TextTable except column defs
* resize cols to heading widths, clear indent
*/
void clear();
};
#endif
| 4,269 | 23.261364 | 73 | h |
null | ceph-main/src/common/Thread.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_THREAD_H
#define CEPH_THREAD_H
#include <functional>
#include <string_view>
#include <system_error>
#include <thread>
#include <pthread.h>
#include <sys/types.h>
#include "include/compat.h"
extern pid_t ceph_gettid();
class Thread {
private:
pthread_t thread_id;
pid_t pid;
int cpuid;
std::string thread_name;
void *entry_wrapper();
public:
Thread(const Thread&) = delete;
Thread& operator=(const Thread&) = delete;
Thread();
virtual ~Thread();
protected:
virtual void *entry() = 0;
private:
static void *_entry_func(void *arg);
public:
const pthread_t &get_thread_id() const;
pid_t get_pid() const { return pid; }
bool is_started() const;
bool am_self() const;
int kill(int signal);
int try_create(size_t stacksize);
void create(const char *name, size_t stacksize = 0);
int join(void **prval = 0);
int detach();
int set_affinity(int cpuid);
};
// Functions for with std::thread
void set_thread_name(std::thread& t, const std::string& s);
std::string get_thread_name(const std::thread& t);
void kill(std::thread& t, int signal);
template<typename Fun, typename... Args>
std::thread make_named_thread(std::string_view n,
Fun&& fun,
Args&& ...args) {
return std::thread([n = std::string(n)](auto&& fun, auto&& ...args) {
ceph_pthread_setname(pthread_self(), n.data());
std::invoke(std::forward<Fun>(fun),
std::forward<Args>(args)...);
}, std::forward<Fun>(fun), std::forward<Args>(args)...);
}
#endif
| 1,941 | 22.119048 | 71 | h |
null | ceph-main/src/common/Throttle.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_THROTTLE_H
#define CEPH_THROTTLE_H
#include <atomic>
#include <chrono>
#include <iostream>
#include <list>
#include <map>
#include "common/ceph_mutex.h"
#include "include/Context.h"
#include "common/ThrottleInterface.h"
#include "common/Timer.h"
#include "common/convenience.h"
#if defined(WITH_SEASTAR) && !defined(WITH_ALIEN)
#include "crimson/common/perf_counters_collection.h"
#else
#include "common/perf_counters_collection.h"
#endif
/**
* @class Throttle
* Throttles the maximum number of active requests.
*
* This class defines the maximum number of slots currently taken away. The
* excessive requests for more of them are delayed, until some slots are put
* back, so @p get_current() drops below the limit after fulfills the requests.
*/
class Throttle final : public ThrottleInterface {
CephContext *cct;
const std::string name;
PerfCountersRef logger;
std::atomic<int64_t> count = { 0 }, max = { 0 };
std::mutex lock;
std::list<std::condition_variable> conds;
const bool use_perf;
public:
Throttle(CephContext *cct, const std::string& n, int64_t m = 0, bool _use_perf = true);
~Throttle() override;
private:
void _reset_max(int64_t m);
bool _should_wait(int64_t c) const {
int64_t m = max;
int64_t cur = count;
return
m &&
((c <= m && cur + c > m) || // normally stay under max
(c >= m && cur > m)); // except for large c
}
bool _wait(int64_t c, std::unique_lock<std::mutex>& l);
public:
/**
* gets the number of currently taken slots
* @returns the number of taken slots
*/
int64_t get_current() const {
return count;
}
/**
* get the max number of slots
* @returns the max number of slots
*/
int64_t get_max() const { return max; }
/**
* return true if past midpoint
*/
bool past_midpoint() const {
return count >= max / 2;
}
/**
* set the new max number, and wait until the number of taken slots drains
* and drops below this limit.
*
* @param m the new max number
* @returns true if this method is blocked, false it it returns immediately
*/
bool wait(int64_t m = 0);
/**
* take the specified number of slots from the stock regardless the throttling
* @param c number of slots to take
* @returns the total number of taken slots
*/
int64_t take(int64_t c = 1) override;
/**
* get the specified amount of slots from the stock, but will wait if the
* total number taken by consumer would exceed the maximum number.
* @param c number of slots to get
* @param m new maximum number to set, ignored if it is 0
* @returns true if this request is blocked due to the throttling, false
* otherwise
*/
bool get(int64_t c = 1, int64_t m = 0);
/**
* the unblocked version of @p get()
* @returns true if it successfully got the requested amount,
* or false if it would block.
*/
bool get_or_fail(int64_t c = 1);
/**
* put slots back to the stock
* @param c number of slots to return
* @returns number of requests being hold after this
*/
int64_t put(int64_t c = 1) override;
/**
* reset the zero to the stock
*/
void reset();
void reset_max(int64_t m) {
std::lock_guard l(lock);
_reset_max(m);
}
};
/**
* BackoffThrottle
*
* Creates a throttle which gradually induces delays when get() is called
* based on params low_threshold, high_threshold, expected_throughput,
* high_multiple, and max_multiple.
*
* In [0, low_threshold), we want no delay.
*
* In [low_threshold, high_threshold), delays should be injected based
* on a line from 0 at low_threshold to
* high_multiple * (1/expected_throughput) at high_threshold.
*
* In [high_threshold, 1), we want delays injected based on a line from
* (high_multiple * (1/expected_throughput)) at high_threshold to
* (high_multiple * (1/expected_throughput)) +
* (max_multiple * (1/expected_throughput)) at 1.
*
* Let the current throttle ratio (current/max) be r, low_threshold be l,
* high_threshold be h, high_delay (high_multiple / expected_throughput) be e,
* and max_delay (max_multiple / expected_throughput) be m.
*
* delay = 0, r \in [0, l)
* delay = (r - l) * (e / (h - l)), r \in [l, h)
* delay = e + (r - h)((m - e)/(1 - h))
*/
class BackoffThrottle {
const std::string name;
PerfCountersRef logger;
std::mutex lock;
using locker = std::unique_lock<std::mutex>;
unsigned next_cond = 0;
/// allocated once to avoid constantly allocating new ones
std::vector<std::condition_variable> conds;
const bool use_perf;
/// pointers into conds
std::list<std::condition_variable*> waiters;
std::list<std::condition_variable*>::iterator _push_waiter() {
unsigned next = next_cond++;
if (next_cond == conds.size())
next_cond = 0;
return waiters.insert(waiters.end(), &(conds[next]));
}
void _kick_waiters() {
if (!waiters.empty())
waiters.front()->notify_all();
}
/// see above, values are in [0, 1].
double low_threshold = 0;
double high_threshold = 1;
/// see above, values are in seconds
double high_delay_per_count = 0;
double max_delay_per_count = 0;
/// Filled in in set_params
double s0 = 0; ///< e / (h - l), l != h, 0 otherwise
double s1 = 0; ///< (m - e)/(1 - h), 1 != h, 0 otherwise
/// max
uint64_t max = 0;
uint64_t current = 0;
ceph::timespan _get_delay(uint64_t c) const;
public:
/**
* set_params
*
* Sets params. If the params are invalid, returns false
* and populates errstream (if non-null) with a user comprehensible
* explanation.
*/
bool set_params(
double _low_threshold,
double _high_threshold,
double expected_throughput,
double high_multiple,
double max_multiple,
uint64_t throttle_max,
std::ostream *errstream);
ceph::timespan get(uint64_t c = 1);
ceph::timespan wait() {
return get(0);
}
uint64_t put(uint64_t c = 1);
uint64_t take(uint64_t c = 1);
uint64_t get_current();
uint64_t get_max();
BackoffThrottle(CephContext *cct, const std::string& n,
unsigned expected_concurrency, ///< [in] determines size of conds
bool _use_perf = true);
~BackoffThrottle();
};
/**
* @class SimpleThrottle
* This is a simple way to bound the number of concurrent operations.
*
* It tracks the first error encountered, and makes it available
* when all requests are complete. wait_for_ret() should be called
* before the instance is destroyed.
*
* Re-using the same instance isn't safe if you want to check each set
* of operations for errors, since the return value is not reset.
*/
class SimpleThrottle {
public:
SimpleThrottle(uint64_t max, bool ignore_enoent);
~SimpleThrottle();
void start_op();
void end_op(int r);
bool pending_error() const;
int wait_for_ret();
private:
mutable std::mutex m_lock;
std::condition_variable m_cond;
uint64_t m_max;
uint64_t m_current = 0;
int m_ret = 0;
bool m_ignore_enoent;
uint32_t waiters = 0;
};
class OrderedThrottle;
class C_OrderedThrottle : public Context {
public:
C_OrderedThrottle(OrderedThrottle *ordered_throttle, uint64_t tid)
: m_ordered_throttle(ordered_throttle), m_tid(tid) {
}
protected:
void finish(int r) override;
private:
OrderedThrottle *m_ordered_throttle;
uint64_t m_tid;
};
/**
* @class OrderedThrottle
* Throttles the maximum number of active requests and completes them in order
*
* Operations can complete out-of-order but their associated Context callback
* will completed in-order during invocation of start_op() and wait_for_ret()
*/
class OrderedThrottle {
public:
OrderedThrottle(uint64_t max, bool ignore_enoent);
~OrderedThrottle();
C_OrderedThrottle *start_op(Context *on_finish);
void end_op(int r);
bool pending_error() const;
int wait_for_ret();
protected:
friend class C_OrderedThrottle;
void finish_op(uint64_t tid, int r);
private:
struct Result {
bool finished;
int ret_val;
Context *on_finish;
Result(Context *_on_finish = NULL)
: finished(false), ret_val(0), on_finish(_on_finish) {
}
};
typedef std::map<uint64_t, Result> TidResult;
mutable std::mutex m_lock;
std::condition_variable m_cond;
uint64_t m_max;
uint64_t m_current = 0;
int m_ret_val = 0;
bool m_ignore_enoent;
uint64_t m_next_tid = 0;
uint64_t m_complete_tid = 0;
TidResult m_tid_result;
void complete_pending_ops(std::unique_lock<std::mutex>& l);
uint32_t waiters = 0;
};
class TokenBucketThrottle {
struct Bucket {
CephContext *cct;
const std::string name;
uint64_t remain;
uint64_t max;
uint64_t capacity;
uint64_t available;
Bucket(CephContext *cct, const std::string &name, uint64_t m)
: cct(cct), name(name), remain(m), max(m), capacity(m), available(m) {}
uint64_t get(uint64_t c);
uint64_t put(uint64_t tokens, double burst_ratio);
void set_max(uint64_t max, uint64_t burst_seconds);
};
struct Blocker {
uint64_t tokens_requested;
Context *ctx;
Blocker(uint64_t _tokens_requested, Context* _ctx)
: tokens_requested(_tokens_requested), ctx(_ctx) {}
};
CephContext *m_cct;
const std::string m_name;
Bucket m_throttle;
uint64_t m_burst = 0;
uint64_t m_avg = 0;
SafeTimer *m_timer;
ceph::mutex *m_timer_lock;
Context *m_token_ctx = nullptr;
std::list<Blocker> m_blockers;
ceph::mutex m_lock;
// minimum of the filling period.
uint64_t m_tick_min = 50;
// tokens filling period, its unit is millisecond.
uint64_t m_tick = 0;
/**
* These variables are used to calculate how many tokens need to be put into
* the bucket within each tick.
*
* In actual use, the tokens to be put per tick(m_avg / m_ticks_per_second)
* may be a floating point number, but we need an 'uint64_t' to put into the
* bucket.
*
* For example, we set the value of rate to be 950, means 950 iops(or bps).
*
* In this case, the filling period(m_tick) should be 1000 / 950 = 1.052,
* which is too small for the SafeTimer. So we should set the period(m_tick)
* to be 50(m_tick_min), and 20 ticks in one second(m_ticks_per_second).
* The tokens filled in bucket per tick is 950 / 20 = 47.5, not an integer.
*
* To resolve this, we use a method called tokens_filled(m_current_tick) to
* calculate how many tokens will be put so far(until m_current_tick):
*
* tokens_filled = m_current_tick / m_ticks_per_second * m_avg
*
* And the difference between two ticks will be the result we expect.
* tokens in tick 0: (1 / 20 * 950) - (0 / 20 * 950) = 47 - 0 = 47
* tokens in tick 1: (2 / 20 * 950) - (1 / 20 * 950) = 95 - 47 = 48
* tokens in tick 2: (3 / 20 * 950) - (2 / 20 * 950) = 142 - 95 = 47
*
* As a result, the tokens filled in one second will shown as this:
* tick | 1| 2| 3| 4| 5| 6| 7| 8| 9|10|11|12|13|14|15|16|17|18|19|20|
* tokens |47|48|47|48|47|48|47|48|47|48|47|48|47|48|47|48|47|48|47|48|
*/
uint64_t m_ticks_per_second = 0;
uint64_t m_current_tick = 0;
// period for the bucket filling tokens, its unit is seconds.
double m_schedule_tick = 1.0;
public:
TokenBucketThrottle(CephContext *cct, const std::string &name,
uint64_t burst, uint64_t avg,
SafeTimer *timer, ceph::mutex *timer_lock);
~TokenBucketThrottle();
const std::string &get_name() {
return m_name;
}
template <typename T, typename MF, typename I>
void add_blocker(uint64_t c, T&& t, MF&& mf, I&& item, uint64_t flag) {
auto ctx = new LambdaContext(
[t, mf, item=std::forward<I>(item), flag](int) mutable {
(t->*mf)(std::forward<I>(item), flag);
});
m_blockers.emplace_back(c, ctx);
}
template <typename T, typename MF, typename I>
bool get(uint64_t c, T&& t, MF&& mf, I&& item, uint64_t flag) {
bool wait = false;
uint64_t got = 0;
std::lock_guard lock(m_lock);
if (!m_blockers.empty()) {
// Keep the order of requests, add item after previous blocked requests.
wait = true;
} else {
if (0 == m_throttle.max || 0 == m_avg)
return false;
got = m_throttle.get(c);
if (got < c) {
// Not enough tokens, add a blocker for it.
wait = true;
}
}
if (wait) {
add_blocker(c - got, std::forward<T>(t), std::forward<MF>(mf),
std::forward<I>(item), flag);
}
return wait;
}
int set_limit(uint64_t average, uint64_t burst, uint64_t burst_seconds);
void set_schedule_tick_min(uint64_t tick);
private:
uint64_t tokens_filled(double tick);
uint64_t tokens_this_tick();
void add_tokens();
void schedule_timer();
void cancel_timer();
};
#endif
| 12,846 | 26.334043 | 89 | h |
null | ceph-main/src/common/ThrottleInterface.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <cstdint>
class ThrottleInterface {
public:
virtual ~ThrottleInterface() {}
/**
* take the specified number of slots from the stock regardless the throttling
* @param c number of slots to take
* @returns the total number of taken slots
*/
virtual int64_t take(int64_t c = 1) = 0;
/**
* put slots back to the stock
* @param c number of slots to return
* @returns number of requests being hold after this
*/
virtual int64_t put(int64_t c = 1) = 0;
};
| 606 | 24.291667 | 80 | h |
null | ceph-main/src/common/Timer.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_TIMER_H
#define CEPH_TIMER_H
#include <map>
#include "include/common_fwd.h"
#include "ceph_time.h"
#include "ceph_mutex.h"
#include "fair_mutex.h"
#include <condition_variable>
class Context;
template <class Mutex> class CommonSafeTimerThread;
template <class Mutex>
class CommonSafeTimer
{
CephContext *cct;
Mutex& lock;
std::condition_variable_any cond;
bool safe_callbacks;
friend class CommonSafeTimerThread<Mutex>;
class CommonSafeTimerThread<Mutex> *thread;
void timer_thread();
void _shutdown();
using clock_t = ceph::mono_clock;
using scheduled_map_t = std::multimap<clock_t::time_point, Context*>;
scheduled_map_t schedule;
using event_lookup_map_t = std::map<Context*, scheduled_map_t::iterator>;
event_lookup_map_t events;
bool stopping;
void dump(const char *caller = 0) const;
public:
// This class isn't supposed to be copied
CommonSafeTimer(const CommonSafeTimer&) = delete;
CommonSafeTimer& operator=(const CommonSafeTimer&) = delete;
/* Safe callbacks determines whether callbacks are called with the lock
* held.
*
* safe_callbacks = true (default option) guarantees that a cancelled
* event's callback will never be called.
*
* Under some circumstances, holding the lock can cause lock cycles.
* If you are able to relax requirements on cancelled callbacks, then
* setting safe_callbacks = false eliminates the lock cycle issue.
* */
CommonSafeTimer(CephContext *cct, Mutex &l, bool safe_callbacks=true);
virtual ~CommonSafeTimer();
/* Call with the event_lock UNLOCKED.
*
* Cancel all events and stop the timer thread.
*
* If there are any events that still have to run, they will need to take
* the event_lock first. */
void init();
void shutdown();
/* Schedule an event in the future
* Call with the event_lock LOCKED */
Context* add_event_after(ceph::timespan duration, Context *callback);
Context* add_event_after(double seconds, Context *callback);
Context* add_event_at(clock_t::time_point when, Context *callback);
Context* add_event_at(ceph::real_clock::time_point when, Context *callback);
/* Cancel an event.
* Call with the event_lock LOCKED
*
* Returns true if the callback was cancelled.
* Returns false if you never added the callback in the first place.
*/
bool cancel_event(Context *callback);
/* Cancel all events.
* Call with the event_lock LOCKED
*
* When this function returns, all events have been cancelled, and there are no
* more in progress.
*/
void cancel_all_events();
};
extern template class CommonSafeTimer<ceph::mutex>;
extern template class CommonSafeTimer<ceph::fair_mutex>;
using SafeTimer = class CommonSafeTimer<ceph::mutex>;
#endif
| 3,185 | 28.5 | 81 | h |
null | ceph-main/src/common/TracepointProvider.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TRACEPOINT_PROVIDER_H
#define CEPH_TRACEPOINT_PROVIDER_H
#include "common/ceph_context.h"
#include "common/config_obs.h"
#include "common/ceph_mutex.h"
#include "include/dlfcn_compat.h"
class TracepointProvider : public md_config_obs_t {
public:
struct Traits {
const char *library;
const char *config_key;
Traits(const char *library, const char *config_key)
: library(library), config_key(config_key) {
}
};
class Singleton {
public:
Singleton(CephContext *cct, const char *library, const char *config_key)
: tracepoint_provider(new TracepointProvider(cct, library, config_key)) {
}
~Singleton() {
delete tracepoint_provider;
}
inline bool is_enabled() const {
return tracepoint_provider->m_handle != nullptr;
}
private:
TracepointProvider *tracepoint_provider;
};
template <const Traits &traits>
class TypedSingleton : public Singleton {
public:
explicit TypedSingleton(CephContext *cct)
: Singleton(cct, traits.library, traits.config_key) {
}
};
TracepointProvider(CephContext *cct, const char *library,
const char *config_key);
~TracepointProvider() override;
TracepointProvider(const TracepointProvider&) = delete;
TracepointProvider operator =(const TracepointProvider&) = delete;
TracepointProvider(TracepointProvider&&) = delete;
TracepointProvider operator =(TracepointProvider&&) = delete;
template <const Traits &traits>
static void initialize(CephContext *cct) {
#ifdef WITH_LTTNG
cct->lookup_or_create_singleton_object<TypedSingleton<traits>>(
traits.library, false, cct);
#endif
}
protected:
const char** get_tracked_conf_keys() const override {
return m_config_keys;
}
void handle_conf_change(const ConfigProxy& conf,
const std::set <std::string> &changed) override;
private:
CephContext *m_cct;
std::string m_library;
mutable const char* m_config_keys[2];
ceph::mutex m_lock = ceph::make_mutex("TracepointProvider::m_lock");
void* m_handle = nullptr;
void verify_config(const ConfigProxy& conf);
};
#endif // CEPH_TRACEPOINT_PROVIDER_H
| 2,258 | 26.216867 | 79 | h |
null | ceph-main/src/common/WeightedPriorityQueue.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef WP_QUEUE_H
#define WP_QUEUE_H
#include "OpQueue.h"
#include <boost/intrusive/list.hpp>
#include <boost/intrusive/rbtree.hpp>
#include <boost/intrusive/avl_set.hpp>
#include "include/ceph_assert.h"
namespace bi = boost::intrusive;
template <typename T, typename S>
class MapKey
{
public:
bool operator()(const S i, const T &k) const
{
return i < k.key;
}
bool operator()(const T &k, const S i) const
{
return k.key < i;
}
};
template <typename T>
class DelItem
{
public:
void operator()(T* delete_this)
{ delete delete_this; }
};
template <typename T, typename K>
class WeightedPriorityQueue : public OpQueue <T, K>
{
private:
class ListPair : public bi::list_base_hook<>
{
public:
unsigned cost;
T item;
ListPair(unsigned c, T&& i) :
cost(c),
item(std::move(i))
{}
};
class Klass : public bi::set_base_hook<>
{
typedef bi::list<ListPair> ListPairs;
typedef typename ListPairs::iterator Lit;
public:
K key; // klass
ListPairs lp;
Klass(K& k) :
key(k) {
}
~Klass() {
lp.clear_and_dispose(DelItem<ListPair>());
}
friend bool operator< (const Klass &a, const Klass &b)
{ return a.key < b.key; }
friend bool operator> (const Klass &a, const Klass &b)
{ return a.key > b.key; }
friend bool operator== (const Klass &a, const Klass &b)
{ return a.key == b.key; }
void insert(unsigned cost, T&& item, bool front) {
if (front) {
lp.push_front(*new ListPair(cost, std::move(item)));
} else {
lp.push_back(*new ListPair(cost, std::move(item)));
}
}
//Get the cost of the next item to dequeue
unsigned get_cost() const {
ceph_assert(!empty());
return lp.begin()->cost;
}
T pop() {
ceph_assert(!lp.empty());
T ret = std::move(lp.begin()->item);
lp.erase_and_dispose(lp.begin(), DelItem<ListPair>());
return ret;
}
bool empty() const {
return lp.empty();
}
unsigned get_size() const {
return lp.size();
}
void filter_class(std::list<T>* out) {
for (Lit i = --lp.end();; --i) {
if (out) {
out->push_front(std::move(i->item));
}
i = lp.erase_and_dispose(i, DelItem<ListPair>());
if (i == lp.begin()) {
break;
}
}
}
};
class SubQueue : public bi::set_base_hook<>
{
typedef bi::rbtree<Klass> Klasses;
typedef typename Klasses::iterator Kit;
void check_end() {
if (next == klasses.end()) {
next = klasses.begin();
}
}
public:
unsigned key; // priority
Klasses klasses;
Kit next;
SubQueue(unsigned& p) :
key(p),
next(klasses.begin()) {
}
~SubQueue() {
klasses.clear_and_dispose(DelItem<Klass>());
}
friend bool operator< (const SubQueue &a, const SubQueue &b)
{ return a.key < b.key; }
friend bool operator> (const SubQueue &a, const SubQueue &b)
{ return a.key > b.key; }
friend bool operator== (const SubQueue &a, const SubQueue &b)
{ return a.key == b.key; }
bool empty() const {
return klasses.empty();
}
void insert(K cl, unsigned cost, T&& item, bool front = false) {
typename Klasses::insert_commit_data insert_data;
std::pair<Kit, bool> ret =
klasses.insert_unique_check(cl, MapKey<Klass, K>(), insert_data);
if (ret.second) {
ret.first = klasses.insert_unique_commit(*new Klass(cl), insert_data);
check_end();
}
ret.first->insert(cost, std::move(item), front);
}
unsigned get_cost() const {
ceph_assert(!empty());
return next->get_cost();
}
T pop() {
T ret = next->pop();
if (next->empty()) {
next = klasses.erase_and_dispose(next, DelItem<Klass>());
} else {
++next;
}
check_end();
return ret;
}
void filter_class(K& cl, std::list<T>* out) {
Kit i = klasses.find(cl, MapKey<Klass, K>());
if (i != klasses.end()) {
i->filter_class(out);
Kit tmp = klasses.erase_and_dispose(i, DelItem<Klass>());
if (next == i) {
next = tmp;
}
check_end();
}
}
// this is intended for unit tests and should be never used on hot paths
unsigned get_size_slow() const {
unsigned count = 0;
for (const auto& klass : klasses) {
count += klass.get_size();
}
return count;
}
void dump(ceph::Formatter *f) const {
f->dump_int("num_keys", next->get_size());
if (!empty()) {
f->dump_int("first_item_cost", next->get_cost());
}
}
};
class Queue {
typedef bi::rbtree<SubQueue> SubQueues;
typedef typename SubQueues::iterator Sit;
SubQueues queues;
unsigned total_prio;
unsigned max_cost;
public:
Queue() :
total_prio(0),
max_cost(0) {
}
~Queue() {
queues.clear_and_dispose(DelItem<SubQueue>());
}
bool empty() const {
return queues.empty();
}
void insert(unsigned p, K cl, unsigned cost, T&& item, bool front = false) {
typename SubQueues::insert_commit_data insert_data;
std::pair<typename SubQueues::iterator, bool> ret =
queues.insert_unique_check(p, MapKey<SubQueue, unsigned>(), insert_data);
if (ret.second) {
ret.first = queues.insert_unique_commit(*new SubQueue(p), insert_data);
total_prio += p;
}
ret.first->insert(cl, cost, std::move(item), front);
if (cost > max_cost) {
max_cost = cost;
}
}
T pop(bool strict = false) {
Sit i = --queues.end();
if (strict) {
T ret = i->pop();
if (i->empty()) {
queues.erase_and_dispose(i, DelItem<SubQueue>());
}
return ret;
}
if (queues.size() > 1) {
while (true) {
// Pick a new priority out of the total priority.
unsigned prio = rand() % total_prio + 1;
unsigned tp = total_prio - i->key;
// Find the priority corresponding to the picked number.
// Subtract high priorities to low priorities until the picked number
// is more than the total and try to dequeue that priority.
// Reverse the direction from previous implementation because there is a higher
// chance of dequeuing a high priority op so spend less time spinning.
while (prio <= tp) {
--i;
tp -= i->key;
}
// Flip a coin to see if this priority gets to run based on cost.
// The next op's cost is multiplied by .9 and subtracted from the
// max cost seen. Ops with lower costs will have a larger value
// and allow them to be selected easier than ops with high costs.
if (max_cost == 0 || rand() % max_cost <=
(max_cost - ((i->get_cost() * 9) / 10))) {
break;
}
i = --queues.end();
}
}
T ret = i->pop();
if (i->empty()) {
total_prio -= i->key;
queues.erase_and_dispose(i, DelItem<SubQueue>());
}
return ret;
}
void filter_class(K& cl, std::list<T>* out) {
for (Sit i = queues.begin(); i != queues.end();) {
i->filter_class(cl, out);
if (i->empty()) {
total_prio -= i->key;
i = queues.erase_and_dispose(i, DelItem<SubQueue>());
} else {
++i;
}
}
}
// this is intended for unit tests and should be never used on hot paths
unsigned get_size_slow() const {
unsigned count = 0;
for (const auto& queue : queues) {
count += queue.get_size_slow();
}
return count;
}
void dump(ceph::Formatter *f) const {
for (typename SubQueues::const_iterator i = queues.begin();
i != queues.end(); ++i) {
f->dump_int("total_priority", total_prio);
f->dump_int("max_cost", max_cost);
f->open_object_section("subqueue");
f->dump_int("priority", i->key);
i->dump(f);
f->close_section();
}
}
};
Queue strict;
Queue normal;
public:
WeightedPriorityQueue(unsigned max_per, unsigned min_c) :
strict(),
normal()
{
std::srand(time(0));
}
void remove_by_class(K cl, std::list<T>* removed = 0) final {
strict.filter_class(cl, removed);
normal.filter_class(cl, removed);
}
bool empty() const final {
return strict.empty() && normal.empty();
}
void enqueue_strict(K cl, unsigned p, T&& item) final {
strict.insert(p, cl, 0, std::move(item));
}
void enqueue_strict_front(K cl, unsigned p, T&& item) final {
strict.insert(p, cl, 0, std::move(item), true);
}
void enqueue(K cl, unsigned p, unsigned cost, T&& item) final {
normal.insert(p, cl, cost, std::move(item));
}
void enqueue_front(K cl, unsigned p, unsigned cost, T&& item) final {
normal.insert(p, cl, cost, std::move(item), true);
}
T dequeue() override {
ceph_assert(!empty());
if (!strict.empty()) {
return strict.pop(true);
}
return normal.pop();
}
unsigned get_size_slow() {
return strict.get_size_slow() + normal.get_size_slow();
}
void dump(ceph::Formatter *f) const override {
f->open_array_section("high_queues");
strict.dump(f);
f->close_section();
f->open_array_section("queues");
normal.dump(f);
f->close_section();
}
void print(std::ostream &ostream) const final {
ostream << "WeightedPriorityQueue";
}
};
#endif
| 10,003 | 27.259887 | 86 | h |
null | ceph-main/src/common/WorkQueue.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_WORKQUEUE_H
#define CEPH_WORKQUEUE_H
#if defined(WITH_SEASTAR) && !defined(WITH_ALIEN)
// for ObjectStore.h
struct ThreadPool {
struct TPHandle {
};
};
#else
#include <atomic>
#include <list>
#include <set>
#include <string>
#include <vector>
#include "common/ceph_mutex.h"
#include "include/unordered_map.h"
#include "common/config_obs.h"
#include "common/HeartbeatMap.h"
#include "common/Thread.h"
#include "include/common_fwd.h"
#include "include/Context.h"
#include "common/HBHandle.h"
/// Pool of threads that share work submitted to multiple work queues.
class ThreadPool : public md_config_obs_t {
protected:
CephContext *cct;
std::string name;
std::string thread_name;
std::string lockname;
ceph::mutex _lock;
ceph::condition_variable _cond;
bool _stop;
int _pause;
int _draining;
ceph::condition_variable _wait_cond;
public:
class TPHandle : public HBHandle {
friend class ThreadPool;
CephContext *cct;
ceph::heartbeat_handle_d *hb;
ceph::timespan grace;
ceph::timespan suicide_grace;
public:
TPHandle(
CephContext *cct,
ceph::heartbeat_handle_d *hb,
ceph::timespan grace,
ceph::timespan suicide_grace)
: cct(cct), hb(hb), grace(grace), suicide_grace(suicide_grace) {}
void reset_tp_timeout() override final;
void suspend_tp_timeout() override final;
};
protected:
/// Basic interface to a work queue used by the worker threads.
struct WorkQueue_ {
std::string name;
std::atomic<ceph::timespan> timeout_interval = ceph::timespan::zero();
std::atomic<ceph::timespan> suicide_interval = ceph::timespan::zero();
WorkQueue_(std::string n, ceph::timespan ti, ceph::timespan sti)
: name(std::move(n)), timeout_interval(ti), suicide_interval(sti)
{ }
virtual ~WorkQueue_() {}
/// Remove all work items from the queue.
virtual void _clear() = 0;
/// Check whether there is anything to do.
virtual bool _empty() = 0;
/// Get the next work item to process.
virtual void *_void_dequeue() = 0;
/** @brief Process the work item.
* This function will be called several times in parallel
* and must therefore be thread-safe. */
virtual void _void_process(void *item, TPHandle &handle) = 0;
/** @brief Synchronously finish processing a work item.
* This function is called after _void_process with the global thread pool lock held,
* so at most one copy will execute simultaneously for a given thread pool.
* It can be used for non-thread-safe finalization. */
virtual void _void_process_finish(void *) = 0;
void set_timeout(time_t ti){
timeout_interval.store(ceph::make_timespan(ti));
}
void set_suicide_timeout(time_t sti){
suicide_interval.store(ceph::make_timespan(sti));
}
};
// track thread pool size changes
unsigned _num_threads;
std::string _thread_num_option;
const char **_conf_keys;
const char **get_tracked_conf_keys() const override {
return _conf_keys;
}
void handle_conf_change(const ConfigProxy& conf,
const std::set <std::string> &changed) override;
public:
/** @brief Templated by-value work queue.
* Skeleton implementation of a queue that processes items submitted by value.
* This is useful if the items are single primitive values or very small objects
* (a few bytes). The queue will automatically add itself to the thread pool on
* construction and remove itself on destruction. */
template<typename T, typename U = T>
class WorkQueueVal : public WorkQueue_ {
ceph::mutex _lock = ceph::make_mutex("WorkQueueVal::_lock");
ThreadPool *pool;
std::list<U> to_process;
std::list<U> to_finish;
virtual void _enqueue(T) = 0;
virtual void _enqueue_front(T) = 0;
bool _empty() override = 0;
virtual U _dequeue() = 0;
virtual void _process_finish(U) {}
void *_void_dequeue() override {
{
std::lock_guard l(_lock);
if (_empty())
return 0;
U u = _dequeue();
to_process.push_back(u);
}
return ((void*)1); // Not used
}
void _void_process(void *, TPHandle &handle) override {
_lock.lock();
ceph_assert(!to_process.empty());
U u = to_process.front();
to_process.pop_front();
_lock.unlock();
_process(u, handle);
_lock.lock();
to_finish.push_back(u);
_lock.unlock();
}
void _void_process_finish(void *) override {
_lock.lock();
ceph_assert(!to_finish.empty());
U u = to_finish.front();
to_finish.pop_front();
_lock.unlock();
_process_finish(u);
}
void _clear() override {}
public:
WorkQueueVal(std::string n,
ceph::timespan ti,
ceph::timespan sti,
ThreadPool *p)
: WorkQueue_(std::move(n), ti, sti), pool(p) {
pool->add_work_queue(this);
}
~WorkQueueVal() override {
pool->remove_work_queue(this);
}
void queue(T item) {
std::lock_guard l(pool->_lock);
_enqueue(item);
pool->_cond.notify_one();
}
void queue_front(T item) {
std::lock_guard l(pool->_lock);
_enqueue_front(item);
pool->_cond.notify_one();
}
void drain() {
pool->drain(this);
}
protected:
void lock() {
pool->lock();
}
void unlock() {
pool->unlock();
}
virtual void _process(U u, TPHandle &) = 0;
};
/** @brief Template by-pointer work queue.
* Skeleton implementation of a queue that processes items of a given type submitted as pointers.
* This is useful when the work item are large or include dynamically allocated memory. The queue
* will automatically add itself to the thread pool on construction and remove itself on
* destruction. */
template<class T>
class WorkQueue : public WorkQueue_ {
ThreadPool *pool;
/// Add a work item to the queue.
virtual bool _enqueue(T *) = 0;
/// Dequeue a previously submitted work item.
virtual void _dequeue(T *) = 0;
/// Dequeue a work item and return the original submitted pointer.
virtual T *_dequeue() = 0;
virtual void _process_finish(T *) {}
// implementation of virtual methods from WorkQueue_
void *_void_dequeue() override {
return (void *)_dequeue();
}
void _void_process(void *p, TPHandle &handle) override {
_process(static_cast<T *>(p), handle);
}
void _void_process_finish(void *p) override {
_process_finish(static_cast<T *>(p));
}
protected:
/// Process a work item. Called from the worker threads.
virtual void _process(T *t, TPHandle &) = 0;
public:
WorkQueue(std::string n,
ceph::timespan ti, ceph::timespan sti,
ThreadPool* p)
: WorkQueue_(std::move(n), ti, sti), pool(p) {
pool->add_work_queue(this);
}
~WorkQueue() override {
pool->remove_work_queue(this);
}
bool queue(T *item) {
pool->_lock.lock();
bool r = _enqueue(item);
pool->_cond.notify_one();
pool->_lock.unlock();
return r;
}
void dequeue(T *item) {
pool->_lock.lock();
_dequeue(item);
pool->_lock.unlock();
}
void clear() {
pool->_lock.lock();
_clear();
pool->_lock.unlock();
}
void lock() {
pool->lock();
}
void unlock() {
pool->unlock();
}
/// wake up the thread pool (without lock held)
void wake() {
pool->wake();
}
/// wake up the thread pool (with lock already held)
void _wake() {
pool->_wake();
}
void _wait() {
pool->_wait();
}
void drain() {
pool->drain(this);
}
};
template<typename T>
class PointerWQ : public WorkQueue_ {
public:
~PointerWQ() override {
m_pool->remove_work_queue(this);
ceph_assert(m_processing == 0);
}
void drain() {
{
// if this queue is empty and not processing, don't wait for other
// queues to finish processing
std::lock_guard l(m_pool->_lock);
if (m_processing == 0 && m_items.empty()) {
return;
}
}
m_pool->drain(this);
}
void queue(T *item) {
std::lock_guard l(m_pool->_lock);
m_items.push_back(item);
m_pool->_cond.notify_one();
}
bool empty() {
std::lock_guard l(m_pool->_lock);
return _empty();
}
protected:
PointerWQ(std::string n,
ceph::timespan ti, ceph::timespan sti,
ThreadPool* p)
: WorkQueue_(std::move(n), ti, sti), m_pool(p), m_processing(0) {
}
void register_work_queue() {
m_pool->add_work_queue(this);
}
void _clear() override {
ceph_assert(ceph_mutex_is_locked(m_pool->_lock));
m_items.clear();
}
bool _empty() override {
ceph_assert(ceph_mutex_is_locked(m_pool->_lock));
return m_items.empty();
}
void *_void_dequeue() override {
ceph_assert(ceph_mutex_is_locked(m_pool->_lock));
if (m_items.empty()) {
return NULL;
}
++m_processing;
T *item = m_items.front();
m_items.pop_front();
return item;
}
void _void_process(void *item, ThreadPool::TPHandle &handle) override {
process(reinterpret_cast<T *>(item));
}
void _void_process_finish(void *item) override {
ceph_assert(ceph_mutex_is_locked(m_pool->_lock));
ceph_assert(m_processing > 0);
--m_processing;
}
virtual void process(T *item) = 0;
void process_finish() {
std::lock_guard locker(m_pool->_lock);
_void_process_finish(nullptr);
}
T *front() {
ceph_assert(ceph_mutex_is_locked(m_pool->_lock));
if (m_items.empty()) {
return NULL;
}
return m_items.front();
}
void requeue_front(T *item) {
std::lock_guard pool_locker(m_pool->_lock);
_void_process_finish(nullptr);
m_items.push_front(item);
}
void requeue_back(T *item) {
std::lock_guard pool_locker(m_pool->_lock);
_void_process_finish(nullptr);
m_items.push_back(item);
}
void signal() {
std::lock_guard pool_locker(m_pool->_lock);
m_pool->_cond.notify_one();
}
ceph::mutex &get_pool_lock() {
return m_pool->_lock;
}
private:
ThreadPool *m_pool;
std::list<T *> m_items;
uint32_t m_processing;
};
protected:
std::vector<WorkQueue_*> work_queues;
int next_work_queue = 0;
// threads
struct WorkThread : public Thread {
ThreadPool *pool;
// cppcheck-suppress noExplicitConstructor
WorkThread(ThreadPool *p) : pool(p) {}
void *entry() override {
pool->worker(this);
return 0;
}
};
std::set<WorkThread*> _threads;
std::list<WorkThread*> _old_threads; ///< need to be joined
int processing;
void start_threads();
void join_old_threads();
virtual void worker(WorkThread *wt);
public:
ThreadPool(CephContext *cct_, std::string nm, std::string tn, int n, const char *option = NULL);
~ThreadPool() override;
/// return number of threads currently running
int get_num_threads() {
std::lock_guard l(_lock);
return _num_threads;
}
/// assign a work queue to this thread pool
void add_work_queue(WorkQueue_* wq) {
std::lock_guard l(_lock);
work_queues.push_back(wq);
}
/// remove a work queue from this thread pool
void remove_work_queue(WorkQueue_* wq) {
std::lock_guard l(_lock);
unsigned i = 0;
while (work_queues[i] != wq)
i++;
for (i++; i < work_queues.size(); i++)
work_queues[i-1] = work_queues[i];
ceph_assert(i == work_queues.size());
work_queues.resize(i-1);
}
/// take thread pool lock
void lock() {
_lock.lock();
}
/// release thread pool lock
void unlock() {
_lock.unlock();
}
/// wait for a kick on this thread pool
void wait(ceph::condition_variable &c) {
std::unique_lock l(_lock, std::adopt_lock);
c.wait(l);
}
/// wake up a waiter (with lock already held)
void _wake() {
_cond.notify_all();
}
/// wake up a waiter (without lock held)
void wake() {
std::lock_guard l(_lock);
_cond.notify_all();
}
void _wait() {
std::unique_lock l(_lock, std::adopt_lock);
_cond.wait(l);
}
/// start thread pool thread
void start();
/// stop thread pool thread
void stop(bool clear_after=true);
/// pause thread pool (if it not already paused)
void pause();
/// pause initiation of new work
void pause_new();
/// resume work in thread pool. must match each pause() call 1:1 to resume.
void unpause();
/** @brief Wait until work completes.
* If the parameter is NULL, blocks until all threads are idle.
* If it is not NULL, blocks until the given work queue does not have
* any items left to process. */
void drain(WorkQueue_* wq = 0);
};
class GenContextWQ :
public ThreadPool::WorkQueueVal<GenContext<ThreadPool::TPHandle&>*> {
std::list<GenContext<ThreadPool::TPHandle&>*> _queue;
public:
GenContextWQ(const std::string &name, ceph::timespan ti, ThreadPool *tp)
: ThreadPool::WorkQueueVal<
GenContext<ThreadPool::TPHandle&>*>(name, ti, ti*10, tp) {}
void _enqueue(GenContext<ThreadPool::TPHandle&> *c) override {
_queue.push_back(c);
}
void _enqueue_front(GenContext<ThreadPool::TPHandle&> *c) override {
_queue.push_front(c);
}
bool _empty() override {
return _queue.empty();
}
GenContext<ThreadPool::TPHandle&> *_dequeue() override {
ceph_assert(!_queue.empty());
GenContext<ThreadPool::TPHandle&> *c = _queue.front();
_queue.pop_front();
return c;
}
void _process(GenContext<ThreadPool::TPHandle&> *c,
ThreadPool::TPHandle &tp) override {
c->complete(tp);
}
};
class C_QueueInWQ : public Context {
GenContextWQ *wq;
GenContext<ThreadPool::TPHandle&> *c;
public:
C_QueueInWQ(GenContextWQ *wq, GenContext<ThreadPool::TPHandle &> *c)
: wq(wq), c(c) {}
void finish(int) override {
wq->queue(c);
}
};
/// Work queue that asynchronously completes contexts (executes callbacks).
/// @see Finisher
class ContextWQ : public ThreadPool::PointerWQ<Context> {
public:
ContextWQ(const std::string &name, ceph::timespan ti, ThreadPool *tp)
: ThreadPool::PointerWQ<Context>(name, ti, ceph::timespan::zero(), tp) {
this->register_work_queue();
}
void queue(Context *ctx, int result = 0) {
if (result != 0) {
std::lock_guard locker(m_lock);
m_context_results[ctx] = result;
}
ThreadPool::PointerWQ<Context>::queue(ctx);
}
protected:
void _clear() override {
ThreadPool::PointerWQ<Context>::_clear();
std::lock_guard locker(m_lock);
m_context_results.clear();
}
void process(Context *ctx) override {
int result = 0;
{
std::lock_guard locker(m_lock);
ceph::unordered_map<Context *, int>::iterator it =
m_context_results.find(ctx);
if (it != m_context_results.end()) {
result = it->second;
m_context_results.erase(it);
}
}
ctx->complete(result);
}
private:
ceph::mutex m_lock = ceph::make_mutex("ContextWQ::m_lock");
ceph::unordered_map<Context*, int> m_context_results;
};
class ShardedThreadPool {
CephContext *cct;
std::string name;
std::string thread_name;
std::string lockname;
ceph::mutex shardedpool_lock;
ceph::condition_variable shardedpool_cond;
ceph::condition_variable wait_cond;
uint32_t num_threads;
std::atomic<bool> stop_threads = { false };
std::atomic<bool> pause_threads = { false };
std::atomic<bool> drain_threads = { false };
uint32_t num_paused;
uint32_t num_drained;
public:
class BaseShardedWQ {
public:
std::atomic<ceph::timespan> timeout_interval = ceph::timespan::zero();
std::atomic<ceph::timespan> suicide_interval = ceph::timespan::zero();
BaseShardedWQ(ceph::timespan ti, ceph::timespan sti)
:timeout_interval(ti), suicide_interval(sti) {}
virtual ~BaseShardedWQ() {}
virtual void _process(uint32_t thread_index, ceph::heartbeat_handle_d *hb ) = 0;
virtual void return_waiting_threads() = 0;
virtual void stop_return_waiting_threads() = 0;
virtual bool is_shard_empty(uint32_t thread_index) = 0;
void set_timeout(time_t ti) {
timeout_interval.store(ceph::make_timespan(ti));
}
void set_suicide_timeout(time_t sti) {
suicide_interval.store(ceph::make_timespan(sti));
}
};
template <typename T>
class ShardedWQ: public BaseShardedWQ {
ShardedThreadPool* sharded_pool;
protected:
virtual void _enqueue(T&&) = 0;
virtual void _enqueue_front(T&&) = 0;
public:
ShardedWQ(ceph::timespan ti,
ceph::timespan sti, ShardedThreadPool* tp)
: BaseShardedWQ(ti, sti), sharded_pool(tp) {
tp->set_wq(this);
}
~ShardedWQ() override {}
void queue(T&& item) {
_enqueue(std::move(item));
}
void queue_front(T&& item) {
_enqueue_front(std::move(item));
}
void drain() {
sharded_pool->drain();
}
};
private:
BaseShardedWQ* wq;
// threads
struct WorkThreadSharded : public Thread {
ShardedThreadPool *pool;
uint32_t thread_index;
WorkThreadSharded(ShardedThreadPool *p, uint32_t pthread_index): pool(p),
thread_index(pthread_index) {}
void *entry() override {
pool->shardedthreadpool_worker(thread_index);
return 0;
}
};
std::vector<WorkThreadSharded*> threads_shardedpool;
void start_threads();
void shardedthreadpool_worker(uint32_t thread_index);
void set_wq(BaseShardedWQ* swq) {
wq = swq;
}
public:
ShardedThreadPool(CephContext *cct_, std::string nm, std::string tn, uint32_t pnum_threads);
~ShardedThreadPool(){};
/// start thread pool thread
void start();
/// stop thread pool thread
void stop();
/// pause thread pool (if it not already paused)
void pause();
/// pause initiation of new work
void pause_new();
/// resume work in thread pool. must match each pause() call 1:1 to resume.
void unpause();
/// wait for all work to complete
void drain();
};
#endif
#endif
| 18,509 | 25.90407 | 99 | h |
null | ceph-main/src/common/addr_parsing.c | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#if defined(__FreeBSD__) || defined(_AIX)
#include <sys/socket.h>
#include <netinet/in.h>
#endif
#include <netdb.h>
#define BUF_SIZE 128
#define ROUND_UP_128(x) (-(-(x) & -128))
int safe_cat(char **pstr, int *plen, int pos, const char *src)
{
size_t len2 = strlen(src);
size_t new_size = pos + len2 + 1;
if (*plen < new_size) {
size_t round_up = ROUND_UP_128(new_size);
void* p = realloc(*pstr, round_up);
if (!p) {
printf("Out of memory\n");
exit(1);
} else {
*pstr = p;
}
}
memcpy(*pstr + pos, src, len2 + 1);
return pos + len2;
}
char *resolve_addrs(const char *orig_str)
{
int len = BUF_SIZE;
char *new_str = (char *)malloc(len);
if (!new_str) {
return NULL;
}
char *saveptr = NULL;
char *buf = strdup(orig_str);
const char *delim = ",; ";
char *tok = strtok_r(buf, delim, &saveptr);
int pos = 0;
while (tok) {
struct addrinfo hint;
struct addrinfo *res, *ores;
char *firstcolon, *lastcolon, *bracecolon;
int r;
int brackets = 0;
firstcolon = strchr(tok, ':');
lastcolon = strrchr(tok, ':');
bracecolon = strstr(tok, "]:");
char *port_str = 0;
if (firstcolon && firstcolon == lastcolon) {
/* host:port or a.b.c.d:port */
*firstcolon = 0;
port_str = firstcolon + 1;
} else if (bracecolon) {
/* [ipv6addr]:port */
port_str = bracecolon + 1;
*port_str = 0;
port_str++;
}
if (port_str && !*port_str)
port_str = NULL;
if (*tok == '[' &&
tok[strlen(tok)-1] == ']') {
tok[strlen(tok)-1] = 0;
tok++;
brackets = 1;
}
//printf("name '%s' port '%s'\n", tok, port_str);
// FIPS zeroization audit 20191115: this memset is fine.
memset(&hint, 0, sizeof(hint));
hint.ai_family = AF_UNSPEC;
hint.ai_socktype = SOCK_STREAM;
hint.ai_protocol = IPPROTO_TCP;
r = getaddrinfo(tok, port_str, &hint, &res);
if (r < 0) {
printf("server name not found: %s (%s)\n", tok,
gai_strerror(r));
free(new_str);
free(buf);
return 0;
}
/* build resolved addr list */
ores = res;
while (res) {
char host[40], port[40];
getnameinfo(res->ai_addr, res->ai_addrlen,
host, sizeof(host),
port, sizeof(port),
NI_NUMERICSERV | NI_NUMERICHOST);
/*printf(" host %s port %s flags %d family %d socktype %d proto %d sanonname %s\n",
host, port,
res->ai_flags, res->ai_family, res->ai_socktype, res->ai_protocol,
res->ai_canonname);*/
if (res->ai_family == AF_INET6)
brackets = 1; /* always surround ipv6 addrs with brackets */
if (brackets)
pos = safe_cat(&new_str, &len, pos, "[");
pos = safe_cat(&new_str, &len, pos, host);
if (brackets)
pos = safe_cat(&new_str, &len, pos, "]");
if (port_str) {
pos = safe_cat(&new_str, &len, pos, ":");
pos = safe_cat(&new_str, &len, pos, port);
}
res = res->ai_next;
if (res)
pos = safe_cat(&new_str, &len, pos, ",");
}
freeaddrinfo(ores);
tok = strtok_r(NULL, delim, &saveptr);
if (tok)
pos = safe_cat(&new_str, &len, pos, ",");
}
//printf("new_str is '%s'\n", new_str);
free(buf);
return new_str;
}
| 3,678 | 23.526667 | 89 | c |
null | ceph-main/src/common/admin_socket.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_COMMON_ADMIN_SOCKET_H
#define CEPH_COMMON_ADMIN_SOCKET_H
#if defined(WITH_SEASTAR) && !defined(WITH_ALIEN)
#include "crimson/admin/admin_socket.h"
#else
#include <condition_variable>
#include <mutex>
#include <string>
#include <string_view>
#include <thread>
#include "include/buffer.h"
#include "include/common_fwd.h"
#include "common/ref.h"
#include "common/cmdparse.h"
class MCommand;
class MMonCommand;
inline constexpr auto CEPH_ADMIN_SOCK_VERSION = std::string_view("2");
class AdminSocketHook {
public:
/**
* @brief
* Handler for admin socket commands, synchronous version
*
* Executes action associated with admin command and returns byte-stream output @c out.
* There is no restriction on output. Each handler defines output semantics.
* Typically output is textual representation of some ceph's internals.
* Handler should use provided formatter @c f if structuralized output is being produced.
*
* @param command[in] String matching constant part of cmddesc in @ref AdminSocket::register_command
* @param cmdmap[in] Parameters extracted from argument part of cmddesc in @ref AdminSocket::register_command
* @param f[in] Formatter created according to requestor preference, used by `ceph --format`
* @param errss[out] Error stream, should contain details in case of execution failure
* @param out[out] Produced output
*
* @retval 0 Success, errss is ignored and does not contribute to output
* @retval <0 Error code, errss is prepended to @c out
*
* @note If @c out is empty, then admin socket will try to flush @c f to out.
*/
virtual int call(
std::string_view command,
const cmdmap_t& cmdmap,
const ceph::buffer::list& inbl,
ceph::Formatter *f,
std::ostream& errss,
ceph::buffer::list& out) = 0;
/**
* @brief
* Handler for admin socket commands, asynchronous version
*
* Executes action associated with admin command and prepares byte-stream response.
* When processing is done @c on_finish must be called.
* There is no restriction on output. Each handler defines own output semantics.
* Typically output is textual representation of some ceph's internals.
* Input @c inbl can be passed, see ceph --in-file.
* Handler should use provided formatter @c f if structuralized output is being produced.
* on_finish handler has following parameters:
* - result code of handler (same as @ref AdminSocketHook::call)
* - error message, text
* - output
*
* @param[in] command String matching constant part of cmddesc in @ref AdminSocket::register_command
* @param[in] cmdmap Parameters extracted from argument part of cmddesc in @ref AdminSocket::register_command
* @param[in] f Formatter created according to requestor preference, used by `ceph --format`
* @param[in] inbl Input content for handler
* @param[in] on_finish Function to call when processing is done
*
* @note If @c out is empty, then admin socket will try to flush @c f to out.
*/
virtual void call_async(
std::string_view command,
const cmdmap_t& cmdmap,
ceph::Formatter *f,
const ceph::buffer::list& inbl,
std::function<void(int,const std::string&,ceph::buffer::list&)> on_finish) {
// by default, call the synchronous handler and then finish
ceph::buffer::list out;
std::ostringstream errss;
int r = call(command, cmdmap, inbl, f, errss, out);
on_finish(r, errss.str(), out);
}
virtual ~AdminSocketHook() {}
};
class AdminSocket
{
public:
AdminSocket(CephContext *cct);
~AdminSocket();
AdminSocket(const AdminSocket&) = delete;
AdminSocket& operator =(const AdminSocket&) = delete;
AdminSocket(AdminSocket&&) = delete;
AdminSocket& operator =(AdminSocket&&) = delete;
/**
* register an admin socket command
*
* The command is registered under a command string. Incoming
* commands are split by space and matched against the longest
* registered command. For example, if 'foo' and 'foo bar' are
* registered, and an incoming command is 'foo bar baz', it is
* matched with 'foo bar', while 'foo fud' will match 'foo'.
*
* The entire incoming command string is passed to the registered
* hook.
*
* @param command command string
* @param cmddesc command syntax descriptor
* @param hook implementation
* @param help help text. if empty, command will not be included in 'help' output.
*
* @return 0 for success, -EEXIST if command already registered.
*/
int register_command(std::string_view cmddesc,
AdminSocketHook *hook,
std::string_view help);
/*
* unregister all commands belong to hook.
*/
void unregister_commands(const AdminSocketHook *hook);
bool init(const std::string& path);
void chown(uid_t uid, gid_t gid);
void chmod(mode_t mode);
/// execute (async)
void execute_command(
const std::vector<std::string>& cmd,
const ceph::buffer::list& inbl,
std::function<void(int,const std::string&,ceph::buffer::list&)> on_fin);
/// execute (blocking)
int execute_command(
const std::vector<std::string>& cmd,
const ceph::buffer::list& inbl,
std::ostream& errss,
ceph::buffer::list *outbl);
void queue_tell_command(ceph::cref_t<MCommand> m);
void queue_tell_command(ceph::cref_t<MMonCommand> m); // for compat
private:
void shutdown();
void wakeup();
std::string create_wakeup_pipe(int *pipe_rd, int *pipe_wr);
std::string destroy_wakeup_pipe();
std::string bind_and_listen(const std::string &sock_path, int *fd);
std::thread th;
void entry() noexcept;
void do_accept();
void do_tell_queue();
CephContext *m_cct;
std::string m_path;
int m_sock_fd = -1;
int m_wakeup_rd_fd = -1;
int m_wakeup_wr_fd = -1;
bool m_shutdown = false;
bool in_hook = false;
std::condition_variable in_hook_cond;
std::mutex lock; // protects `hooks`
std::unique_ptr<AdminSocketHook> version_hook;
std::unique_ptr<AdminSocketHook> help_hook;
std::unique_ptr<AdminSocketHook> getdescs_hook;
std::mutex tell_lock;
std::list<ceph::cref_t<MCommand>> tell_queue;
std::list<ceph::cref_t<MMonCommand>> tell_legacy_queue;
struct hook_info {
AdminSocketHook* hook;
std::string desc;
std::string help;
hook_info(AdminSocketHook* hook, std::string_view desc,
std::string_view help)
: hook(hook), desc(desc), help(help) {}
};
/// find the first hook which matches the given prefix and cmdmap
std::pair<int, AdminSocketHook*> find_matched_hook(
std::string& prefix,
const cmdmap_t& cmdmap);
std::multimap<std::string, hook_info, std::less<>> hooks;
friend class AdminSocketTest;
friend class HelpHook;
friend class GetdescsHook;
};
#endif
#endif
| 7,193 | 31.405405 | 112 | h |
null | ceph-main/src/common/admin_socket_client.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_COMMON_ADMIN_SOCKET_CLIENT_H
#define CEPH_COMMON_ADMIN_SOCKET_CLIENT_H
#include <string>
/* This is a simple client that talks to an AdminSocket using blocking I/O.
* We put a 5-second timeout on send and recv operations.
*/
class AdminSocketClient
{
public:
AdminSocketClient(const std::string &path);
std::string do_request(std::string request, std::string *result);
std::string ping(bool *ok);
private:
std::string m_path;
};
const char* get_rand_socket_path();
#endif
| 908 | 24.25 | 75 | h |
null | ceph-main/src/common/allocate_unique.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2020 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include <memory>
namespace ceph {
/// An allocator-aware 'Deleter' for std::unique_ptr<T, Deleter>. The
/// allocator's traits must have a value_type of T.
template <typename Alloc>
class deallocator {
using allocator_type = Alloc;
using allocator_traits = std::allocator_traits<allocator_type>;
using pointer = typename allocator_traits::pointer;
allocator_type alloc;
public:
explicit deallocator(const allocator_type& alloc) noexcept : alloc(alloc) {}
void operator()(pointer p) {
allocator_traits::destroy(alloc, p);
allocator_traits::deallocate(alloc, p, 1);
}
};
/// deallocator alias that rebinds Alloc's value_type to T
template <typename T, typename Alloc>
using deallocator_t = deallocator<typename std::allocator_traits<Alloc>
::template rebind_alloc<T>>;
/// std::unique_ptr alias that rebinds Alloc if necessary, and avoids repetition
/// of the template parameter T.
template <typename T, typename Alloc>
using allocated_unique_ptr = std::unique_ptr<T, deallocator_t<T, Alloc>>;
/// Returns a std::unique_ptr whose memory is managed by the given allocator.
template <typename T, typename Alloc, typename... Args>
static auto allocate_unique(Alloc& alloc, Args&&... args)
-> allocated_unique_ptr<T, Alloc>
{
static_assert(!std::is_array_v<T>, "allocate_unique() does not support T[]");
using allocator_type = typename std::allocator_traits<Alloc>
::template rebind_alloc<T>;
using allocator_traits = std::allocator_traits<allocator_type>;
auto a = allocator_type{alloc};
auto p = allocator_traits::allocate(a, 1);
try {
allocator_traits::construct(a, p, std::forward<Args>(args)...);
return {p, deallocator<allocator_type>{a}};
} catch (...) {
allocator_traits::deallocate(a, p, 1);
throw;
}
}
} // namespace ceph
| 2,217 | 30.685714 | 80 | h |
null | ceph-main/src/common/autovector.h | // Copyright (c) 2018-Present Red Hat Inc. All rights reserved.
//
// Copyright (c) 2011-2018, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 and Apache 2.0 License
#ifndef CEPH_AUTOVECTOR_H
#define CEPH_AUTOVECTOR_H
#include <algorithm>
#include <cassert>
#include <initializer_list>
#include <iterator>
#include <stdexcept>
#include <vector>
#include "include/ceph_assert.h"
// A vector that leverages pre-allocated stack-based array to achieve better
// performance for array with small amount of items.
//
// The interface resembles that of vector, but with less features since we aim
// to solve the problem that we have in hand, rather than implementing a
// full-fledged generic container.
//
// Currently we don't support:
// * reserve()/shrink_to_fit()
// If used correctly, in most cases, people should not touch the
// underlying vector at all.
// * random insert()/erase(), please only use push_back()/pop_back().
// * No move/swap operations. Each autovector instance has a
// stack-allocated array and if we want support move/swap operations, we
// need to copy the arrays other than just swapping the pointers. In this
// case we'll just explicitly forbid these operations since they may
// lead users to make false assumption by thinking they are inexpensive
// operations.
//
// Naming style of public methods almost follows that of the STL's.
namespace ceph {
template <class T, size_t kSize = 8>
class autovector {
public:
// General STL-style container member types.
typedef T value_type;
typedef typename std::vector<T>::difference_type difference_type;
typedef typename std::vector<T>::size_type size_type;
typedef value_type& reference;
typedef const value_type& const_reference;
typedef value_type* pointer;
typedef const value_type* const_pointer;
// This class is the base for regular/const iterator
template <class TAutoVector, class TValueType>
class iterator_impl {
public:
// -- iterator traits
typedef iterator_impl<TAutoVector, TValueType> self_type;
typedef TValueType value_type;
typedef TValueType& reference;
typedef TValueType* pointer;
typedef typename TAutoVector::difference_type difference_type;
typedef std::random_access_iterator_tag iterator_category;
iterator_impl(TAutoVector* vect, size_t index)
: vect_(vect), index_(index) {};
iterator_impl(const iterator_impl&) = default;
~iterator_impl() {}
iterator_impl& operator=(const iterator_impl&) = default;
// -- Advancement
// ++iterator
self_type& operator++() {
++index_;
return *this;
}
// iterator++
self_type operator++(int) {
auto old = *this;
++index_;
return old;
}
// --iterator
self_type& operator--() {
--index_;
return *this;
}
// iterator--
self_type operator--(int) {
auto old = *this;
--index_;
return old;
}
self_type operator-(difference_type len) const {
return self_type(vect_, index_ - len);
}
difference_type operator-(const self_type& other) const {
ceph_assert(vect_ == other.vect_);
return index_ - other.index_;
}
self_type operator+(difference_type len) const {
return self_type(vect_, index_ + len);
}
self_type& operator+=(difference_type len) {
index_ += len;
return *this;
}
self_type& operator-=(difference_type len) {
index_ -= len;
return *this;
}
// -- Reference
reference operator*() {
ceph_assert(vect_->size() >= index_);
return (*vect_)[index_];
}
const_reference operator*() const {
ceph_assert(vect_->size() >= index_);
return (*vect_)[index_];
}
pointer operator->() {
ceph_assert(vect_->size() >= index_);
return &(*vect_)[index_];
}
const_pointer operator->() const {
ceph_assert(vect_->size() >= index_);
return &(*vect_)[index_];
}
// -- Logical Operators
bool operator==(const self_type& other) const {
ceph_assert(vect_ == other.vect_);
return index_ == other.index_;
}
bool operator!=(const self_type& other) const { return !(*this == other); }
bool operator>(const self_type& other) const {
ceph_assert(vect_ == other.vect_);
return index_ > other.index_;
}
bool operator<(const self_type& other) const {
ceph_assert(vect_ == other.vect_);
return index_ < other.index_;
}
bool operator>=(const self_type& other) const {
ceph_assert(vect_ == other.vect_);
return index_ >= other.index_;
}
bool operator<=(const self_type& other) const {
ceph_assert(vect_ == other.vect_);
return index_ <= other.index_;
}
private:
TAutoVector* vect_ = nullptr;
size_t index_ = 0;
};
typedef iterator_impl<autovector, value_type> iterator;
typedef iterator_impl<const autovector, const value_type> const_iterator;
typedef std::reverse_iterator<iterator> reverse_iterator;
typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
autovector() = default;
autovector(std::initializer_list<T> init_list) {
for (const T& item : init_list) {
push_back(item);
}
}
~autovector() = default;
// -- Immutable operations
// Indicate if all data resides in in-stack data structure.
bool only_in_stack() const {
// If no element was inserted at all, the vector's capacity will be `0`.
return vect_.capacity() == 0;
}
size_type size() const { return num_stack_items_ + vect_.size(); }
// resize does not guarantee anything about the contents of the newly
// available elements
void resize(size_type n) {
if (n > kSize) {
vect_.resize(n - kSize);
num_stack_items_ = kSize;
} else {
vect_.clear();
num_stack_items_ = n;
}
}
bool empty() const { return size() == 0; }
const_reference operator[](size_type n) const {
ceph_assert(n < size());
return n < kSize ? values_[n] : vect_[n - kSize];
}
reference operator[](size_type n) {
ceph_assert(n < size());
return n < kSize ? values_[n] : vect_[n - kSize];
}
const_reference at(size_type n) const {
ceph_assert(n < size());
return (*this)[n];
}
reference at(size_type n) {
ceph_assert(n < size());
return (*this)[n];
}
reference front() {
ceph_assert(!empty());
return *begin();
}
const_reference front() const {
ceph_assert(!empty());
return *begin();
}
reference back() {
ceph_assert(!empty());
return *(end() - 1);
}
const_reference back() const {
ceph_assert(!empty());
return *(end() - 1);
}
// -- Mutable Operations
void push_back(T&& item) {
if (num_stack_items_ < kSize) {
values_[num_stack_items_++] = std::move(item);
} else {
vect_.push_back(item);
}
}
void push_back(const T& item) {
if (num_stack_items_ < kSize) {
values_[num_stack_items_++] = item;
} else {
vect_.push_back(item);
}
}
template <class... Args>
void emplace_back(Args&&... args) {
push_back(value_type(args...));
}
void pop_back() {
ceph_assert(!empty());
if (!vect_.empty()) {
vect_.pop_back();
} else {
--num_stack_items_;
}
}
void clear() {
num_stack_items_ = 0;
vect_.clear();
}
// -- Copy and Assignment
autovector& assign(const autovector& other);
autovector(const autovector& other) { assign(other); }
autovector& operator=(const autovector& other) { return assign(other); }
// -- Iterator Operations
iterator begin() { return iterator(this, 0); }
const_iterator begin() const { return const_iterator(this, 0); }
iterator end() { return iterator(this, this->size()); }
const_iterator end() const { return const_iterator(this, this->size()); }
reverse_iterator rbegin() { return reverse_iterator(end()); }
const_reverse_iterator rbegin() const {
return const_reverse_iterator(end());
}
reverse_iterator rend() { return reverse_iterator(begin()); }
const_reverse_iterator rend() const {
return const_reverse_iterator(begin());
}
private:
size_type num_stack_items_ = 0; // current number of items
value_type values_[kSize]; // the first `kSize` items
// used only if there are more than `kSize` items.
std::vector<T> vect_;
};
template <class T, size_t kSize>
autovector<T, kSize>& autovector<T, kSize>::assign(const autovector& other) {
// copy the internal vector
vect_.assign(other.vect_.begin(), other.vect_.end());
// copy array
num_stack_items_ = other.num_stack_items_;
std::copy(other.values_, other.values_ + num_stack_items_, values_);
return *this;
}
} // namespace ceph
#endif // CEPH_AUTOVECTOR_H
| 8,854 | 25.275964 | 79 | h |
null | ceph-main/src/common/bit_str.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2016 SUSE LINUX GmbH
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_COMMON_BIT_STR_H
#define CEPH_COMMON_BIT_STR_H
#include <cstdint>
#include <iosfwd>
#include <functional>
namespace ceph {
class Formatter;
}
extern void print_bit_str(
uint64_t bits,
std::ostream &out,
const std::function<const char*(uint64_t)> &func,
bool dump_bit_val = false);
extern void dump_bit_str(
uint64_t bits,
ceph::Formatter *f,
const std::function<const char*(uint64_t)> &func,
bool dump_bit_val = false);
#endif /* CEPH_COMMON_BIT_STR_H */
| 913 | 23.052632 | 70 | h |
null | ceph-main/src/common/blkdev.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef __CEPH_COMMON_BLKDEV_H
#define __CEPH_COMMON_BLKDEV_H
#include <set>
#include <map>
#include <string>
#include "json_spirit/json_spirit_value.h"
extern int get_device_by_path(const char *path, char* partition, char* device, size_t max);
extern std::string _decode_model_enc(const std::string& in); // helper, exported only so we can unit test
// get $vendor_$model_$serial style device id
extern std::string get_device_id(const std::string& devname,
std::string *err=0);
// get /dev/disk/by-path/... style device id that is stable for a disk slot across reboots etc
extern std::string get_device_path(const std::string& devname,
std::string *err=0);
// populate daemon metadata map with device info
extern void get_device_metadata(
const std::set<std::string>& devnames,
std::map<std::string,std::string> *pm,
std::map<std::string,std::string> *errs);
extern void get_dm_parents(const std::string& dev, std::set<std::string> *ls);
extern int block_device_get_metrics(const std::string& devname, int timeout,
json_spirit::mValue *result);
// do everything to translate a device to the raw physical devices that
// back it, including partitions -> wholedisks and dm -> constituent devices.
extern void get_raw_devices(const std::string& in,
std::set<std::string> *ls);
class BlkDev {
public:
BlkDev(int fd);
BlkDev(const std::string& devname);
/* GoogleMock requires a virtual destructor */
virtual ~BlkDev() {}
// from an fd
int discard(int64_t offset, int64_t len) const;
int get_size(int64_t *psize) const;
int get_devid(dev_t *id) const;
int partition(char* partition, size_t max) const;
// from a device (e.g., "sdb")
bool support_discard() const;
int get_optimal_io_size() const;
bool is_rotational() const;
int get_numa_node(int *node) const;
int dev(char *dev, size_t max) const;
int vendor(char *vendor, size_t max) const;
int model(char *model, size_t max) const;
int serial(char *serial, size_t max) const;
/* virtual for testing purposes */
virtual const char *sysfsdir() const;
virtual int wholedisk(char* device, size_t max) const;
int wholedisk(std::string *s) const {
char out[PATH_MAX] = {0};
int r = wholedisk(out, sizeof(out));
if (r < 0) {
return r;
}
*s = out;
return r;
}
protected:
int64_t get_int_property(const char* prop) const;
int64_t get_string_property(const char* prop, char *val,
size_t maxlen) const;
private:
int fd = -1;
std::string devname;
};
#endif
| 2,628 | 29.929412 | 106 | h |
null | ceph-main/src/common/bounded_key_counter.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 Red Hat, Inc
*
* Author: Casey Bodley <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef BOUNDED_KEY_COUNTER_H
#define BOUNDED_KEY_COUNTER_H
#include <algorithm>
#include <map>
#include <tuple>
#include <vector>
#include "include/ceph_assert.h"
/**
* BoundedKeyCounter
*
* A data structure that counts the number of times a given key is inserted,
* and can return the keys with the highest counters. The number of unique keys
* is bounded by the given constructor argument, meaning that new keys will be
* rejected if they would exceed this bound.
*
* It is optimized for use where insertion is frequent, but sorted listings are
* both infrequent and tend to request a small subset of the available keys.
*/
template <typename Key, typename Count>
class BoundedKeyCounter {
/// map type to associate keys with their counter values
using map_type = std::map<Key, Count>;
using value_type = typename map_type::value_type;
/// view type used for sorting key-value pairs by their counter value
using view_type = std::vector<const value_type*>;
/// maximum number of counters to store at once
const size_t bound;
/// map of counters, with a maximum size given by 'bound'
map_type counters;
/// storage for sorted key-value pairs
view_type sorted;
/// remembers how much of the range is actually sorted
typename view_type::iterator sorted_position;
/// invalidate view of sorted entries
void invalidate_sorted()
{
sorted_position = sorted.begin();
sorted.clear();
}
/// value_type comparison function for sorting in descending order
static bool value_greater(const value_type *lhs, const value_type *rhs)
{
return lhs->second > rhs->second;
}
/// map iterator that adapts value_type to value_type*
struct const_pointer_iterator : public map_type::const_iterator {
const_pointer_iterator(typename map_type::const_iterator i)
: map_type::const_iterator(i) {}
using value_type = typename map_type::const_iterator::value_type*;
using reference = const typename map_type::const_iterator::value_type*;
reference operator*() const {
return &map_type::const_iterator::operator*();
}
};
protected:
/// return the number of sorted entries. marked protected for unit testing
size_t get_num_sorted() const
{
using const_iterator = typename view_type::const_iterator;
return std::distance<const_iterator>(sorted.begin(), sorted_position);
}
public:
BoundedKeyCounter(size_t bound)
: bound(bound)
{
sorted.reserve(bound);
sorted_position = sorted.begin();
}
/// return the number of keys stored
size_t size() const noexcept { return counters.size(); }
/// return the maximum number of keys
size_t capacity() const noexcept { return bound; }
/// increment a counter for the given key and return its value. if the key was
/// not present, insert it. if the map is full, return 0
Count insert(const Key& key, Count n = 1)
{
typename map_type::iterator i;
if (counters.size() < bound) {
// insert new entries at count=0
bool inserted;
std::tie(i, inserted) = counters.emplace(key, 0);
if (inserted) {
sorted.push_back(&*i);
}
} else {
// when full, refuse to insert new entries
i = counters.find(key);
if (i == counters.end()) {
return 0;
}
}
i->second += n; // add to the counter
// update sorted position if necessary. use a binary search for the last
// element in the sorted range that's greater than this counter
sorted_position = std::lower_bound(sorted.begin(), sorted_position,
&*i, &value_greater);
return i->second;
}
/// remove the given key from the map of counters
void erase(const Key& key)
{
auto i = counters.find(key);
if (i == counters.end()) {
return;
}
// removing the sorted entry would require linear search; invalidate instead
invalidate_sorted();
counters.erase(i);
}
/// query the highest N key-value pairs sorted by counter value, passing each
/// in order to the given callback with arguments (Key, Count)
template <typename Callback>
void get_highest(size_t count, Callback&& cb)
{
if (sorted.empty()) {
// initialize the vector with pointers to all key-value pairs
sorted.assign(const_pointer_iterator{counters.cbegin()},
const_pointer_iterator{counters.cend()});
// entire range is unsorted
ceph_assert(sorted_position == sorted.begin());
}
const size_t sorted_count = get_num_sorted();
if (sorted_count < count) {
// move sorted_position to cover the requested number of entries
sorted_position = sorted.begin() + std::min(count, sorted.size());
// sort all entries in descending order up to the given position
std::partial_sort(sorted.begin(), sorted_position, sorted.end(),
&value_greater);
}
// return the requested range via callback
for (const auto& pair : sorted) {
if (count-- == 0) {
return;
}
cb(pair->first, pair->second);
}
}
/// remove all keys and counters and invalidate the sorted range
void clear()
{
invalidate_sorted();
counters.clear();
}
};
#endif // BOUNDED_KEY_COUNTER_H
| 5,718 | 28.786458 | 80 | h |
null | ceph-main/src/common/buffer_instrumentation.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "include/buffer.h"
#include "include/buffer_raw.h"
namespace ceph::buffer_instrumentation {
// this is nothing more than an intermediary for a class hierarchy which
// can placed between a user's custom raw and the `ceph::buffer::raw` to
// detect whether a given `ceph::buffer::ptr` instance wraps a particular
// raw's implementation (via `dynamic_cast` or `typeid`).
//
// users are supposed to define marker type (e.g. `class my_marker{}`).
// this marker. i
template <class MarkerT>
struct instrumented_raw : public ceph::buffer::raw {
using raw::raw;
};
struct instrumented_bptr : public ceph::buffer::ptr {
const ceph::buffer::raw* get_raw() const {
return _raw;
}
template <class MarkerT>
bool is_raw_marked() const {
return dynamic_cast<const instrumented_raw<MarkerT>*>(get_raw()) != nullptr;
}
};
} // namespace ceph::buffer_instrumentation
| 985 | 28.878788 | 80 | h |
null | ceph-main/src/common/buffer_seastar.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <seastar/core/temporary_buffer.hh>
#include "include/buffer.h"
#include "common/error_code.h"
namespace details {
template<bool is_const>
class buffer_iterator_impl {
public:
using pointer = std::conditional_t<is_const, const char*, char *>;
buffer_iterator_impl(pointer first, const char* last)
: pos(first), end_ptr(last)
{}
pointer get_pos_add(size_t n) {
auto r = pos;
pos += n;
if (pos > end_ptr) {
throw buffer::end_of_buffer{};
}
return r;
}
pointer get() const {
return pos;
}
protected:
pointer pos;
const char* end_ptr;
};
} // namespace details
class seastar_buffer_iterator : details::buffer_iterator_impl<false> {
using parent = details::buffer_iterator_impl<false>;
using temporary_buffer = seastar::temporary_buffer<char>;
public:
seastar_buffer_iterator(temporary_buffer& b)
: parent(b.get_write(), b.end()), buf(b)
{}
using parent::pointer;
using parent::get_pos_add;
using parent::get;
ceph::buffer::ptr get_ptr(size_t len);
private:
// keep the reference to buf around, so it can be "shared" by get_ptr()
temporary_buffer& buf;
};
class const_seastar_buffer_iterator : details::buffer_iterator_impl<true> {
using parent = details::buffer_iterator_impl<true>;
using temporary_buffer = seastar::temporary_buffer<char>;
public:
const_seastar_buffer_iterator(temporary_buffer& b)
: parent(b.get_write(), b.end())
{}
using parent::pointer;
using parent::get_pos_add;
using parent::get;
ceph::buffer::ptr get_ptr(size_t len);
};
| 1,651 | 25.222222 | 75 | h |
null | ceph-main/src/common/ceph_argparse.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2008-2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_ARGPARSE_H
#define CEPH_ARGPARSE_H
/*
* Ceph argument parsing library
*
* We probably should eventually replace this with something standard like popt.
* Until we do that, though, this file is the place for argv parsing
* stuff to live.
*/
#include <string>
#include <vector>
#include "common/entity_name.h"
#include "include/encoding.h"
/////////////////////// Types ///////////////////////
class CephInitParameters
{
public:
explicit CephInitParameters(uint32_t module_type_);
uint32_t module_type;
EntityName name;
bool no_config_file = false;
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(module_type, bl);
encode(name, bl);
encode(no_config_file, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(module_type, bl);
decode(name, bl);
decode(no_config_file, bl);
DECODE_FINISH(bl);
}
};
WRITE_CLASS_ENCODER(CephInitParameters)
/////////////////////// Functions ///////////////////////
extern void string_to_vec(std::vector<std::string>& args, std::string argstr);
extern void clear_g_str_vec();
extern void env_to_vec(std::vector<const char*>& args, const char *name=nullptr);
extern std::vector<const char*> argv_to_vec(int argc, const char* const * argv);
extern void vec_to_argv(const char *argv0, std::vector<const char*>& args,
int *argc, const char ***argv);
extern bool parse_ip_port_vec(const char *s, std::vector<entity_addrvec_t>& vec,
int type=0);
bool ceph_argparse_double_dash(std::vector<const char*> &args,
std::vector<const char*>::iterator &i);
bool ceph_argparse_flag(std::vector<const char*> &args,
std::vector<const char*>::iterator &i, ...);
bool ceph_argparse_witharg(std::vector<const char*> &args,
std::vector<const char*>::iterator &i, std::string *ret,
std::ostream &oss, ...);
bool ceph_argparse_witharg(std::vector<const char*> &args,
std::vector<const char*>::iterator &i, std::string *ret, ...);
template<class T>
bool ceph_argparse_witharg(std::vector<const char*> &args,
std::vector<const char*>::iterator &i, T *ret,
std::ostream &oss, ...);
bool ceph_argparse_binary_flag(std::vector<const char*> &args,
std::vector<const char*>::iterator &i, int *ret,
std::ostream *oss, ...);
extern CephInitParameters ceph_argparse_early_args
(std::vector<const char*>& args, uint32_t module_type,
std::string *cluster, std::string *conf_file_list);
extern bool ceph_argparse_need_usage(const std::vector<const char*>& args);
extern void generic_server_usage();
extern void generic_client_usage();
#endif
| 3,045 | 31.752688 | 81 | h |
null | ceph-main/src/common/ceph_atomic.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <atomic>
// What and why
// ============
//
// ceph::atomic – thin wrapper to differentiate behavior of atomics.
//
// Not all users of the common truly need costly atomic operations to
// synchronize data between CPUs and threads. Some, like crimson-osd,
// stick to shared-nothing approach. Enforcing issue of atomics in
// such cases is wasteful – on x86 any locked instruction works actually
// like a full memory barrier stalling execution till CPU's store and
// load buffers are drained.
#if defined(WITH_SEASTAR) && !defined(WITH_BLUESTORE)
#include <type_traits>
namespace ceph {
template <class T>
class dummy_atomic {
T value;
public:
dummy_atomic() = default;
dummy_atomic(const dummy_atomic&) = delete;
dummy_atomic(T value) : value(std::move(value)) {
}
bool is_lock_free() const noexcept {
return true;
}
void store(T desired, std::memory_order) noexcept {
value = std::move(desired);
}
T load(std::memory_order = std::memory_order_seq_cst) const noexcept {
return value;
}
T operator=(T desired) noexcept {
value = std::move(desired);
return *this;
}
operator T() const noexcept {
return value;
}
// We need to differentiate with SFINAE as std::atomic offers beefier
// interface for integral types.
template<class TT=T>
std::enable_if_t<!std::is_enum_v<TT> && std::is_integral_v<TT>, TT> operator++() {
return ++value;
}
template<class TT=T>
std::enable_if_t<!std::is_enum_v<TT> && std::is_integral_v<TT>, TT> operator++(int) {
return value++;
}
template<class TT=T>
std::enable_if_t<!std::is_enum_v<TT> && std::is_integral_v<TT>, TT> operator--() {
return --value;
}
template<class TT=T>
std::enable_if_t<!std::is_enum_v<TT> && std::is_integral_v<TT>, TT> operator--(int) {
return value--;
}
template<class TT=T>
std::enable_if_t<!std::is_enum_v<TT> && std::is_integral_v<TT>, TT> operator+=(const dummy_atomic& b) {
value += b;
return value;
}
template<class TT=T>
std::enable_if_t<!std::is_enum_v<TT> && std::is_integral_v<TT>, TT> operator-=(const dummy_atomic& b) {
value -= b;
return value;
}
static constexpr bool is_always_lock_free = true;
};
template <class T> using atomic = dummy_atomic<T>;
} // namespace ceph
#else // WITH_SEASTAR
namespace ceph {
template <class T> using atomic = ::std::atomic<T>;
} // namespace ceph
#endif // WITH_SEASTAR
| 2,650 | 27.202128 | 107 | h |
null | ceph-main/src/common/ceph_context.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_CEPHCONTEXT_H
#define CEPH_CEPHCONTEXT_H
#include <atomic>
#include <map>
#include <memory>
#include <mutex>
#include <set>
#include <string>
#include <string_view>
#include <typeinfo>
#include <typeindex>
#include <boost/intrusive_ptr.hpp>
#include "include/any.h"
#include "include/common_fwd.h"
#include "include/compat.h"
#include "common/cmdparse.h"
#include "common/code_environment.h"
#include "msg/msg_types.h"
#if defined(WITH_SEASTAR) && !defined(WITH_ALIEN)
#include "crimson/common/config_proxy.h"
#include "crimson/common/perf_counters_collection.h"
#else
#include "common/config_proxy.h"
#include "include/spinlock.h"
#include "common/perf_counters_collection.h"
#endif
#include "crush/CrushLocation.h"
class AdminSocket;
class CryptoHandler;
class CryptoRandom;
class MonMap;
namespace ceph::common {
class CephContextServiceThread;
class CephContextObs;
class CephContextHook;
}
namespace ceph {
class PluginRegistry;
class HeartbeatMap;
namespace logging {
class Log;
class SubsystemMap;
}
}
#if defined(WITH_SEASTAR) && !defined(WITH_ALIEN)
namespace crimson::common {
class CephContext {
public:
CephContext();
CephContext(uint32_t,
code_environment_t=CODE_ENVIRONMENT_UTILITY,
int = 0)
: CephContext{}
{}
CephContext(CephContext&&) = default;
~CephContext();
uint32_t get_module_type() const;
bool check_experimental_feature_enabled(const std::string& feature) {
// everything crimson is experimental...
return true;
}
ceph::PluginRegistry* get_plugin_registry() {
return _plugin_registry;
}
CryptoRandom* random() const;
PerfCountersCollectionImpl* get_perfcounters_collection();
crimson::common::ConfigProxy& _conf;
crimson::common::PerfCountersCollection& _perf_counters_collection;
CephContext* get();
void put();
private:
std::unique_ptr<CryptoRandom> _crypto_random;
unsigned nref;
ceph::PluginRegistry* _plugin_registry;
};
}
#else
#ifdef __cplusplus
namespace ceph::common {
#endif
/* A CephContext represents the context held by a single library user.
* There can be multiple CephContexts in the same process.
*
* For daemons and utility programs, there will be only one CephContext. The
* CephContext contains the configuration, the dout object, and anything else
* that you might want to pass to libcommon with every function call.
*/
class CephContext {
public:
CephContext(uint32_t module_type_,
enum code_environment_t code_env=CODE_ENVIRONMENT_UTILITY,
int init_flags_ = 0);
struct create_options {
enum code_environment_t code_env=CODE_ENVIRONMENT_UTILITY;
int init_flags = 0;
std::function<ceph::logging::Log* (const ceph::logging::SubsystemMap *)> create_log;
};
CephContext(uint32_t module_type_,
const create_options& options);
CephContext(const CephContext&) = delete;
CephContext& operator =(const CephContext&) = delete;
CephContext(CephContext&&) = delete;
CephContext& operator =(CephContext&&) = delete;
bool _finished = false;
~CephContext();
// ref count!
private:
std::atomic<unsigned> nref;
public:
CephContext *get() {
++nref;
return this;
}
void put();
ConfigProxy _conf;
ceph::logging::Log *_log;
/* init ceph::crypto */
void init_crypto();
/// shutdown crypto (should match init_crypto calls)
void shutdown_crypto();
/* Start the Ceph Context's service thread */
void start_service_thread();
/* Reopen the log files */
void reopen_logs();
/* Get the module type (client, mon, osd, mds, etc.) */
uint32_t get_module_type() const;
// this is here only for testing purposes!
void _set_module_type(uint32_t t) {
_module_type = t;
}
void set_init_flags(int flags);
int get_init_flags() const;
/* Get the PerfCountersCollection of this CephContext */
PerfCountersCollection *get_perfcounters_collection();
ceph::HeartbeatMap *get_heartbeat_map() {
return _heartbeat_map;
}
/**
* Get the admin socket associated with this CephContext.
*
* Currently there is always an admin socket object,
* so this will never return NULL.
*
* @return the admin socket
*/
AdminSocket *get_admin_socket();
/**
* process an admin socket command
*/
int do_command(std::string_view command, const cmdmap_t& cmdmap,
Formatter *f,
std::ostream& errss,
ceph::bufferlist *out);
int _do_command(std::string_view command, const cmdmap_t& cmdmap,
Formatter *f,
std::ostream& errss,
ceph::bufferlist *out);
static constexpr std::size_t largest_singleton = 8 * 72;
template<typename T, typename... Args>
T& lookup_or_create_singleton_object(std::string_view name,
bool drop_on_fork,
Args&&... args) {
static_assert(sizeof(T) <= largest_singleton,
"Please increase largest singleton.");
std::lock_guard lg(associated_objs_lock);
std::type_index type = typeid(T);
auto i = associated_objs.find(std::make_pair(name, type));
if (i == associated_objs.cend()) {
if (drop_on_fork) {
associated_objs_drop_on_fork.insert(std::string(name));
}
i = associated_objs.emplace_hint(
i,
std::piecewise_construct,
std::forward_as_tuple(name, type),
std::forward_as_tuple(std::in_place_type<T>,
std::forward<Args>(args)...));
}
return ceph::any_cast<T&>(i->second);
}
/**
* get a crypto handler
*/
CryptoHandler *get_crypto_handler(int type);
CryptoRandom* random() const { return _crypto_random.get(); }
/// check if experimental feature is enable, and emit appropriate warnings
bool check_experimental_feature_enabled(const std::string& feature);
bool check_experimental_feature_enabled(const std::string& feature,
std::ostream *message);
ceph::PluginRegistry *get_plugin_registry() {
return _plugin_registry;
}
void set_uid_gid(uid_t u, gid_t g) {
_set_uid = u;
_set_gid = g;
}
uid_t get_set_uid() const {
return _set_uid;
}
gid_t get_set_gid() const {
return _set_gid;
}
void set_uid_gid_strings(const std::string &u, const std::string &g) {
_set_uid_string = u;
_set_gid_string = g;
}
std::string get_set_uid_string() const {
return _set_uid_string;
}
std::string get_set_gid_string() const {
return _set_gid_string;
}
class ForkWatcher {
public:
virtual ~ForkWatcher() {}
virtual void handle_pre_fork() = 0;
virtual void handle_post_fork() = 0;
};
void register_fork_watcher(ForkWatcher *w) {
std::lock_guard lg(_fork_watchers_lock);
_fork_watchers.push_back(w);
}
void notify_pre_fork();
void notify_post_fork();
/**
* update CephContext with a copy of the passed in MonMap mon addrs
*
* @param mm MonMap to extract and update mon addrs
*/
void set_mon_addrs(const MonMap& mm);
void set_mon_addrs(const std::vector<entity_addrvec_t>& in) {
auto ptr = std::make_shared<std::vector<entity_addrvec_t>>(in);
atomic_store_explicit(&_mon_addrs, std::move(ptr), std::memory_order_relaxed);
}
std::shared_ptr<std::vector<entity_addrvec_t>> get_mon_addrs() const {
auto ptr = atomic_load_explicit(&_mon_addrs, std::memory_order_relaxed);
return ptr;
}
private:
/* Stop and join the Ceph Context's service thread */
void join_service_thread();
uint32_t _module_type;
int _init_flags;
uid_t _set_uid; ///< uid to drop privs to
gid_t _set_gid; ///< gid to drop privs to
std::string _set_uid_string;
std::string _set_gid_string;
int _crypto_inited;
std::shared_ptr<std::vector<entity_addrvec_t>> _mon_addrs;
/* libcommon service thread.
* SIGHUP wakes this thread, which then reopens logfiles */
friend class CephContextServiceThread;
CephContextServiceThread *_service_thread;
using md_config_obs_t = ceph::md_config_obs_impl<ConfigProxy>;
md_config_obs_t *_log_obs;
/* The admin socket associated with this context */
AdminSocket *_admin_socket;
/* lock which protects service thread creation, destruction, etc. */
ceph::spinlock _service_thread_lock;
/* The collection of profiling loggers associated with this context */
PerfCountersCollection *_perf_counters_collection;
md_config_obs_t *_perf_counters_conf_obs;
CephContextHook *_admin_hook;
ceph::HeartbeatMap *_heartbeat_map;
ceph::spinlock associated_objs_lock;
struct associated_objs_cmp {
using is_transparent = std::true_type;
template<typename T, typename U>
bool operator ()(const std::pair<T, std::type_index>& l,
const std::pair<U, std::type_index>& r) const noexcept {
return ((l.first < r.first) ||
(l.first == r.first && l.second < r.second));
}
};
std::map<std::pair<std::string, std::type_index>,
ceph::immobile_any<largest_singleton>,
associated_objs_cmp> associated_objs;
std::set<std::string> associated_objs_drop_on_fork;
ceph::spinlock _fork_watchers_lock;
std::vector<ForkWatcher*> _fork_watchers;
// crypto
CryptoHandler *_crypto_none;
CryptoHandler *_crypto_aes;
std::unique_ptr<CryptoRandom> _crypto_random;
// experimental
CephContextObs *_cct_obs;
ceph::spinlock _feature_lock;
std::set<std::string> _experimental_features;
ceph::PluginRegistry* _plugin_registry;
#ifdef CEPH_DEBUG_MUTEX
md_config_obs_t *_lockdep_obs;
#endif
public:
TOPNSPC::crush::CrushLocation crush_location;
private:
enum {
l_cct_first,
l_cct_total_workers,
l_cct_unhealthy_workers,
l_cct_last
};
enum {
l_mempool_first = 873222,
l_mempool_bytes,
l_mempool_items,
l_mempool_last
};
PerfCounters *_cct_perf = nullptr;
PerfCounters* _mempool_perf = nullptr;
std::vector<std::string> _mempool_perf_names, _mempool_perf_descriptions;
/**
* Enable the performance counters.
*/
void _enable_perf_counter();
/**
* Disable the performance counter.
*/
void _disable_perf_counter();
/**
* Refresh perf counter values.
*/
void _refresh_perf_values();
friend class CephContextObs;
};
#ifdef __cplusplus
}
#endif
#endif // WITH_SEASTAR
#if !(defined(WITH_SEASTAR) && !defined(WITH_ALIEN)) && defined(__cplusplus)
namespace ceph::common {
inline void intrusive_ptr_add_ref(CephContext* cct)
{
cct->get();
}
inline void intrusive_ptr_release(CephContext* cct)
{
cct->put();
}
}
#endif // !(defined(WITH_SEASTAR) && !defined(WITH_ALIEN)) && defined(__cplusplus)
#endif
| 10,853 | 24.538824 | 88 | h |
null | ceph-main/src/common/ceph_crypto.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
#ifndef CEPH_CRYPTO_H
#define CEPH_CRYPTO_H
#include "acconfig.h"
#include <stdexcept>
#include "include/common_fwd.h"
#include "include/buffer.h"
#include "include/types.h"
#define CEPH_CRYPTO_MD5_DIGESTSIZE 16
#define CEPH_CRYPTO_HMACSHA1_DIGESTSIZE 20
#define CEPH_CRYPTO_SHA1_DIGESTSIZE 20
#define CEPH_CRYPTO_HMACSHA256_DIGESTSIZE 32
#define CEPH_CRYPTO_SHA256_DIGESTSIZE 32
#define CEPH_CRYPTO_SHA512_DIGESTSIZE 64
#include <openssl/evp.h>
#include <openssl/ossl_typ.h>
#include <openssl/hmac.h>
#include "include/ceph_assert.h"
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
extern "C" {
const EVP_MD *EVP_md5(void);
const EVP_MD *EVP_sha1(void);
const EVP_MD *EVP_sha256(void);
const EVP_MD *EVP_sha512(void);
}
namespace TOPNSPC::crypto {
void assert_init();
void init();
void shutdown(bool shared=true);
void zeroize_for_security(void *s, size_t n);
class DigestException : public std::runtime_error
{
public:
DigestException(const char* what_arg) : runtime_error(what_arg)
{}
};
namespace ssl {
class OpenSSLDigest {
private:
EVP_MD_CTX *mpContext;
const EVP_MD *mpType;
EVP_MD *mpType_FIPS = nullptr;
public:
OpenSSLDigest (const EVP_MD *_type);
~OpenSSLDigest ();
OpenSSLDigest(OpenSSLDigest&& o) noexcept;
OpenSSLDigest& operator=(OpenSSLDigest&& o) noexcept;
void Restart();
void SetFlags(int flags);
void Update (const unsigned char *input, size_t length);
void Final (unsigned char *digest);
};
class MD5 : public OpenSSLDigest {
public:
static constexpr size_t digest_size = CEPH_CRYPTO_MD5_DIGESTSIZE;
MD5 () : OpenSSLDigest(EVP_md5()) { }
};
class SHA1 : public OpenSSLDigest {
public:
static constexpr size_t digest_size = CEPH_CRYPTO_SHA1_DIGESTSIZE;
SHA1 () : OpenSSLDigest(EVP_sha1()) { }
};
class SHA256 : public OpenSSLDigest {
public:
static constexpr size_t digest_size = CEPH_CRYPTO_SHA256_DIGESTSIZE;
SHA256 () : OpenSSLDigest(EVP_sha256()) { }
};
class SHA512 : public OpenSSLDigest {
public:
static constexpr size_t digest_size = CEPH_CRYPTO_SHA512_DIGESTSIZE;
SHA512 () : OpenSSLDigest(EVP_sha512()) { }
};
# if OPENSSL_VERSION_NUMBER < 0x10100000L
class HMAC {
private:
HMAC_CTX mContext;
const EVP_MD *mpType;
public:
HMAC (const EVP_MD *type, const unsigned char *key, size_t length)
: mpType(type) {
// the strict FIPS zeroization doesn't seem to be necessary here.
// just in the case.
::TOPNSPC::crypto::zeroize_for_security(&mContext, sizeof(mContext));
const auto r = HMAC_Init_ex(&mContext, key, length, mpType, nullptr);
if (r != 1) {
throw DigestException("HMAC_Init_ex() failed");
}
}
~HMAC () {
HMAC_CTX_cleanup(&mContext);
}
void Restart () {
const auto r = HMAC_Init_ex(&mContext, nullptr, 0, mpType, nullptr);
if (r != 1) {
throw DigestException("HMAC_Init_ex() failed");
}
}
void Update (const unsigned char *input, size_t length) {
if (length) {
const auto r = HMAC_Update(&mContext, input, length);
if (r != 1) {
throw DigestException("HMAC_Update() failed");
}
}
}
void Final (unsigned char *digest) {
unsigned int s;
const auto r = HMAC_Final(&mContext, digest, &s);
if (r != 1) {
throw DigestException("HMAC_Final() failed");
}
}
};
# else
class HMAC {
private:
HMAC_CTX *mpContext;
public:
HMAC (const EVP_MD *type, const unsigned char *key, size_t length)
: mpContext(HMAC_CTX_new()) {
const auto r = HMAC_Init_ex(mpContext, key, length, type, nullptr);
if (r != 1) {
throw DigestException("HMAC_Init_ex() failed");
}
}
~HMAC () {
HMAC_CTX_free(mpContext);
}
void Restart () {
const EVP_MD * const type = HMAC_CTX_get_md(mpContext);
const auto r = HMAC_Init_ex(mpContext, nullptr, 0, type, nullptr);
if (r != 1) {
throw DigestException("HMAC_Init_ex() failed");
}
}
void Update (const unsigned char *input, size_t length) {
if (length) {
const auto r = HMAC_Update(mpContext, input, length);
if (r != 1) {
throw DigestException("HMAC_Update() failed");
}
}
}
void Final (unsigned char *digest) {
unsigned int s;
const auto r = HMAC_Final(mpContext, digest, &s);
if (r != 1) {
throw DigestException("HMAC_Final() failed");
}
}
};
# endif // OPENSSL_VERSION_NUMBER < 0x10100000L
struct HMACSHA1 : public HMAC {
HMACSHA1 (const unsigned char *key, size_t length)
: HMAC(EVP_sha1(), key, length) {
}
};
struct HMACSHA256 : public HMAC {
HMACSHA256 (const unsigned char *key, size_t length)
: HMAC(EVP_sha256(), key, length) {
}
};
}
using ssl::SHA256;
using ssl::MD5;
using ssl::SHA1;
using ssl::SHA512;
using ssl::HMACSHA256;
using ssl::HMACSHA1;
template<class Digest>
auto digest(const ceph::buffer::list& bl)
{
unsigned char fingerprint[Digest::digest_size];
Digest gen;
for (auto& p : bl.buffers()) {
gen.Update((const unsigned char *)p.c_str(), p.length());
}
gen.Final(fingerprint);
return sha_digest_t<Digest::digest_size>{fingerprint};
}
}
#pragma clang diagnostic pop
#pragma GCC diagnostic pop
#endif
| 5,574 | 24.573394 | 76 | h |
null | ceph-main/src/common/ceph_json.h | #ifndef CEPH_JSON_H
#define CEPH_JSON_H
#include <stdexcept>
#include <typeindex>
#include <include/types.h>
#include <boost/container/flat_map.hpp>
#include <boost/container/flat_set.hpp>
#include <include/ceph_fs.h>
#include "common/ceph_time.h"
#include "json_spirit/json_spirit.h"
#include "Formatter.h"
class JSONObj;
class JSONObjIter {
typedef std::map<std::string, JSONObj *>::iterator map_iter_t;
map_iter_t cur;
map_iter_t last;
public:
JSONObjIter();
~JSONObjIter();
void set(const JSONObjIter::map_iter_t &_cur, const JSONObjIter::map_iter_t &_end);
void operator++();
JSONObj *operator*();
bool end() const {
return (cur == last);
}
};
class JSONObj
{
JSONObj *parent;
public:
struct data_val {
std::string str;
bool quoted{false};
void set(std::string_view s, bool q) {
str = s;
quoted = q;
}
};
protected:
std::string name; // corresponds to obj_type in XMLObj
json_spirit::Value data;
struct data_val val;
bool data_quoted{false};
std::multimap<std::string, JSONObj *> children;
std::map<std::string, data_val> attr_map;
void handle_value(json_spirit::Value v);
public:
JSONObj() : parent(NULL){}
virtual ~JSONObj();
void init(JSONObj *p, json_spirit::Value v, std::string n);
std::string& get_name() { return name; }
data_val& get_data_val() { return val; }
const std::string& get_data() { return val.str; }
bool get_data(const std::string& key, data_val *dest);
JSONObj *get_parent();
void add_child(std::string el, JSONObj *child);
bool get_attr(std::string name, data_val& attr);
JSONObjIter find(const std::string& name);
JSONObjIter find_first();
JSONObjIter find_first(const std::string& name);
JSONObj *find_obj(const std::string& name);
friend std::ostream& operator<<(std::ostream &out,
const JSONObj &obj); // does not work, FIXME
bool is_array();
bool is_object();
std::vector<std::string> get_array_elements();
};
inline std::ostream& operator<<(std::ostream &out, const JSONObj::data_val& dv) {
const char *q = (dv.quoted ? "\"" : "");
out << q << dv.str << q;
return out;
}
class JSONParser : public JSONObj
{
int buf_len;
std::string json_buffer;
bool success;
public:
JSONParser();
~JSONParser() override;
void handle_data(const char *s, int len);
bool parse(const char *buf_, int len);
bool parse(int len);
bool parse();
bool parse(const char *file_name);
const char *get_json() { return json_buffer.c_str(); }
void set_failure() { success = false; }
};
void encode_json(const char *name, const JSONObj::data_val& v, ceph::Formatter *f);
class JSONDecoder {
public:
struct err : std::runtime_error {
using runtime_error::runtime_error;
};
JSONParser parser;
JSONDecoder(ceph::buffer::list& bl) {
if (!parser.parse(bl.c_str(), bl.length())) {
std::cout << "JSONDecoder::err()" << std::endl;
throw JSONDecoder::err("failed to parse JSON input");
}
}
template<class T>
static bool decode_json(const char *name, T& val, JSONObj *obj, bool mandatory = false);
template<class C>
static bool decode_json(const char *name, C& container, void (*cb)(C&, JSONObj *obj), JSONObj *obj, bool mandatory = false);
template<class T>
static void decode_json(const char *name, T& val, const T& default_val, JSONObj *obj);
template<class T>
static bool decode_json(const char *name, boost::optional<T>& val, JSONObj *obj, bool mandatory = false);
template<class T>
static bool decode_json(const char *name, std::optional<T>& val, JSONObj *obj, bool mandatory = false);
};
template<class T>
void decode_json_obj(T& val, JSONObj *obj)
{
val.decode_json(obj);
}
inline void decode_json_obj(std::string& val, JSONObj *obj)
{
val = obj->get_data();
}
static inline void decode_json_obj(JSONObj::data_val& val, JSONObj *obj)
{
val = obj->get_data_val();
}
void decode_json_obj(unsigned long long& val, JSONObj *obj);
void decode_json_obj(long long& val, JSONObj *obj);
void decode_json_obj(unsigned long& val, JSONObj *obj);
void decode_json_obj(long& val, JSONObj *obj);
void decode_json_obj(unsigned& val, JSONObj *obj);
void decode_json_obj(int& val, JSONObj *obj);
void decode_json_obj(bool& val, JSONObj *obj);
void decode_json_obj(ceph::buffer::list& val, JSONObj *obj);
class utime_t;
void decode_json_obj(utime_t& val, JSONObj *obj);
void decode_json_obj(ceph_dir_layout& i, JSONObj *obj);
void decode_json_obj(ceph::real_time& val, JSONObj *obj);
void decode_json_obj(ceph::coarse_real_time& val, JSONObj *obj);
template<class T>
void decode_json_obj(std::list<T>& l, JSONObj *obj)
{
l.clear();
JSONObjIter iter = obj->find_first();
for (; !iter.end(); ++iter) {
T val;
JSONObj *o = *iter;
decode_json_obj(val, o);
l.push_back(val);
}
}
template<class T>
void decode_json_obj(std::deque<T>& l, JSONObj *obj)
{
l.clear();
JSONObjIter iter = obj->find_first();
for (; !iter.end(); ++iter) {
T val;
JSONObj *o = *iter;
decode_json_obj(val, o);
l.push_back(val);
}
}
template<class T>
void decode_json_obj(std::set<T>& l, JSONObj *obj)
{
l.clear();
JSONObjIter iter = obj->find_first();
for (; !iter.end(); ++iter) {
T val;
JSONObj *o = *iter;
decode_json_obj(val, o);
l.insert(val);
}
}
template<class T, class Compare, class Alloc>
void decode_json_obj(boost::container::flat_set<T, Compare, Alloc>& l, JSONObj *obj)
{
l.clear();
JSONObjIter iter = obj->find_first();
for (; !iter.end(); ++iter) {
T val;
JSONObj *o = *iter;
decode_json_obj(val, o);
l.insert(val);
}
}
template<class T>
void decode_json_obj(std::vector<T>& l, JSONObj *obj)
{
l.clear();
JSONObjIter iter = obj->find_first();
for (; !iter.end(); ++iter) {
T val;
JSONObj *o = *iter;
decode_json_obj(val, o);
l.push_back(val);
}
}
template<class K, class V, class C = std::less<K> >
void decode_json_obj(std::map<K, V, C>& m, JSONObj *obj)
{
m.clear();
JSONObjIter iter = obj->find_first();
for (; !iter.end(); ++iter) {
K key;
V val;
JSONObj *o = *iter;
JSONDecoder::decode_json("key", key, o);
JSONDecoder::decode_json("val", val, o);
m[key] = val;
}
}
template<class K, class V, class C = std::less<K> >
void decode_json_obj(boost::container::flat_map<K, V, C>& m, JSONObj *obj)
{
m.clear();
JSONObjIter iter = obj->find_first();
for (; !iter.end(); ++iter) {
K key;
V val;
JSONObj *o = *iter;
JSONDecoder::decode_json("key", key, o);
JSONDecoder::decode_json("val", val, o);
m[key] = val;
}
}
template<class K, class V>
void decode_json_obj(std::multimap<K, V>& m, JSONObj *obj)
{
m.clear();
JSONObjIter iter = obj->find_first();
for (; !iter.end(); ++iter) {
K key;
V val;
JSONObj *o = *iter;
JSONDecoder::decode_json("key", key, o);
JSONDecoder::decode_json("val", val, o);
m.insert(make_pair(key, val));
}
}
template<class K, class V>
void decode_json_obj(boost::container::flat_map<K, V>& m, JSONObj *obj)
{
m.clear();
JSONObjIter iter = obj->find_first();
for (; !iter.end(); ++iter) {
K key;
V val;
JSONObj *o = *iter;
JSONDecoder::decode_json("key", key, o);
JSONDecoder::decode_json("val", val, o);
m[key] = val;
}
}
template<class C>
void decode_json_obj(C& container, void (*cb)(C&, JSONObj *obj), JSONObj *obj)
{
container.clear();
JSONObjIter iter = obj->find_first();
for (; !iter.end(); ++iter) {
JSONObj *o = *iter;
cb(container, o);
}
}
template<class T>
bool JSONDecoder::decode_json(const char *name, T& val, JSONObj *obj, bool mandatory)
{
JSONObjIter iter = obj->find_first(name);
if (iter.end()) {
if (mandatory) {
std::string s = "missing mandatory field " + std::string(name);
throw err(s);
}
if constexpr (std::is_default_constructible_v<T>) {
val = T();
}
return false;
}
try {
decode_json_obj(val, *iter);
} catch (const err& e) {
std::string s = std::string(name) + ": ";
s.append(e.what());
throw err(s);
}
return true;
}
template<class C>
bool JSONDecoder::decode_json(const char *name, C& container, void (*cb)(C&, JSONObj *), JSONObj *obj, bool mandatory)
{
container.clear();
JSONObjIter iter = obj->find_first(name);
if (iter.end()) {
if (mandatory) {
std::string s = "missing mandatory field " + std::string(name);
throw err(s);
}
return false;
}
try {
decode_json_obj(container, cb, *iter);
} catch (const err& e) {
std::string s = std::string(name) + ": ";
s.append(e.what());
throw err(s);
}
return true;
}
template<class T>
void JSONDecoder::decode_json(const char *name, T& val, const T& default_val, JSONObj *obj)
{
JSONObjIter iter = obj->find_first(name);
if (iter.end()) {
val = default_val;
return;
}
try {
decode_json_obj(val, *iter);
} catch (const err& e) {
val = default_val;
std::string s = std::string(name) + ": ";
s.append(e.what());
throw err(s);
}
}
template<class T>
bool JSONDecoder::decode_json(const char *name, boost::optional<T>& val, JSONObj *obj, bool mandatory)
{
JSONObjIter iter = obj->find_first(name);
if (iter.end()) {
if (mandatory) {
std::string s = "missing mandatory field " + std::string(name);
throw err(s);
}
val = boost::none;
return false;
}
try {
val.reset(T());
decode_json_obj(val.get(), *iter);
} catch (const err& e) {
val.reset();
std::string s = std::string(name) + ": ";
s.append(e.what());
throw err(s);
}
return true;
}
template<class T>
bool JSONDecoder::decode_json(const char *name, std::optional<T>& val, JSONObj *obj, bool mandatory)
{
JSONObjIter iter = obj->find_first(name);
if (iter.end()) {
if (mandatory) {
std::string s = "missing mandatory field " + std::string(name);
throw err(s);
}
val.reset();
return false;
}
try {
val.emplace();
decode_json_obj(*val, *iter);
} catch (const err& e) {
val.reset();
std::string s = std::string(name) + ": ";
s.append(e.what());
throw err(s);
}
return true;
}
class JSONEncodeFilter
{
public:
class HandlerBase {
public:
virtual ~HandlerBase() {}
virtual std::type_index get_type() = 0;
virtual void encode_json(const char *name, const void *pval, ceph::Formatter *) const = 0;
};
template <class T>
class Handler : public HandlerBase {
public:
virtual ~Handler() {}
std::type_index get_type() override {
return std::type_index(typeid(const T&));
}
};
private:
std::map<std::type_index, HandlerBase *> handlers;
public:
void register_type(HandlerBase *h) {
handlers[h->get_type()] = h;
}
template <class T>
bool encode_json(const char *name, const T& val, ceph::Formatter *f) {
auto iter = handlers.find(std::type_index(typeid(val)));
if (iter == handlers.end()) {
return false;
}
iter->second->encode_json(name, (const void *)&val, f);
return true;
}
};
template<class T>
static void encode_json_impl(const char *name, const T& val, ceph::Formatter *f)
{
f->open_object_section(name);
val.dump(f);
f->close_section();
}
template<class T>
static void encode_json(const char *name, const T& val, ceph::Formatter *f)
{
JSONEncodeFilter *filter = static_cast<JSONEncodeFilter *>(f->get_external_feature_handler("JSONEncodeFilter"));
if (!filter ||
!filter->encode_json(name, val, f)) {
encode_json_impl(name, val, f);
}
}
class utime_t;
void encode_json(const char *name, std::string_view val, ceph::Formatter *f);
void encode_json(const char *name, const std::string& val, ceph::Formatter *f);
void encode_json(const char *name, const char *val, ceph::Formatter *f);
void encode_json(const char *name, bool val, ceph::Formatter *f);
void encode_json(const char *name, int val, ceph::Formatter *f);
void encode_json(const char *name, unsigned val, ceph::Formatter *f);
void encode_json(const char *name, long val, ceph::Formatter *f);
void encode_json(const char *name, unsigned long val, ceph::Formatter *f);
void encode_json(const char *name, long long val, ceph::Formatter *f);
void encode_json(const char *name, const utime_t& val, ceph::Formatter *f);
void encode_json(const char *name, const ceph::buffer::list& bl, ceph::Formatter *f);
void encode_json(const char *name, long long unsigned val, ceph::Formatter *f);
void encode_json(const char *name, const ceph::real_time& val, ceph::Formatter *f);
void encode_json(const char *name, const ceph::coarse_real_time& val, ceph::Formatter *f);
template<class T>
static void encode_json(const char *name, const std::list<T>& l, ceph::Formatter *f)
{
f->open_array_section(name);
for (auto iter = l.cbegin(); iter != l.cend(); ++iter) {
encode_json("obj", *iter, f);
}
f->close_section();
}
template<class T>
static void encode_json(const char *name, const std::deque<T>& l, ceph::Formatter *f)
{
f->open_array_section(name);
for (auto iter = l.cbegin(); iter != l.cend(); ++iter) {
encode_json("obj", *iter, f);
}
f->close_section();
}
template<class T, class Compare = std::less<T> >
static void encode_json(const char *name, const std::set<T, Compare>& l, ceph::Formatter *f)
{
f->open_array_section(name);
for (auto iter = l.cbegin(); iter != l.cend(); ++iter) {
encode_json("obj", *iter, f);
}
f->close_section();
}
template<class T, class Compare, class Alloc>
static void encode_json(const char *name,
const boost::container::flat_set<T, Compare, Alloc>& l,
ceph::Formatter *f)
{
f->open_array_section(name);
for (auto iter = l.cbegin(); iter != l.cend(); ++iter) {
encode_json("obj", *iter, f);
}
f->close_section();
}
template<class T>
static void encode_json(const char *name, const std::vector<T>& l, ceph::Formatter *f)
{
f->open_array_section(name);
for (auto iter = l.cbegin(); iter != l.cend(); ++iter) {
encode_json("obj", *iter, f);
}
f->close_section();
}
template<class K, class V, class C = std::less<K>>
static void encode_json(const char *name, const std::map<K, V, C>& m, ceph::Formatter *f)
{
f->open_array_section(name);
for (auto i = m.cbegin(); i != m.cend(); ++i) {
f->open_object_section("entry");
encode_json("key", i->first, f);
encode_json("val", i->second, f);
f->close_section();
}
f->close_section();
}
template<class K, class V, class C = std::less<K> >
static void encode_json(const char *name, const boost::container::flat_map<K, V, C>& m, ceph::Formatter *f)
{
f->open_array_section(name);
for (auto i = m.cbegin(); i != m.cend(); ++i) {
f->open_object_section("entry");
encode_json("key", i->first, f);
encode_json("val", i->second, f);
f->close_section();
}
f->close_section();
}
template<class K, class V>
static void encode_json(const char *name, const std::multimap<K, V>& m, ceph::Formatter *f)
{
f->open_array_section(name);
for (auto i = m.begin(); i != m.end(); ++i) {
f->open_object_section("entry");
encode_json("key", i->first, f);
encode_json("val", i->second, f);
f->close_section();
}
f->close_section();
}
template<class K, class V>
static void encode_json(const char *name, const boost::container::flat_map<K, V>& m, ceph::Formatter *f)
{
f->open_array_section(name);
for (auto i = m.begin(); i != m.end(); ++i) {
f->open_object_section("entry");
encode_json("key", i->first, f);
encode_json("val", i->second, f);
f->close_section();
}
f->close_section();
}
template<class K, class V>
void encode_json_map(const char *name, const std::map<K, V>& m, ceph::Formatter *f)
{
f->open_array_section(name);
for (auto iter = m.cbegin(); iter != m.cend(); ++iter) {
encode_json("obj", iter->second, f);
}
f->close_section();
}
template<class K, class V>
void encode_json_map(const char *name, const char *index_name,
const char *object_name, const char *value_name,
void (*cb)(const char *, const V&, ceph::Formatter *, void *), void *parent,
const std::map<K, V>& m, ceph::Formatter *f)
{
f->open_array_section(name);
for (auto iter = m.cbegin(); iter != m.cend(); ++iter) {
if (index_name) {
f->open_object_section("key_value");
f->dump_string(index_name, iter->first);
}
if (object_name) {
f->open_object_section(object_name);
}
if (cb) {
cb(value_name, iter->second, f, parent);
} else {
encode_json(value_name, iter->second, f);
}
if (object_name) {
f->close_section();
}
if (index_name) {
f->close_section();
}
}
f->close_section();
}
template<class K, class V>
void encode_json_map(const char *name, const char *index_name,
const char *object_name, const char *value_name,
const std::map<K, V>& m, ceph::Formatter *f)
{
encode_json_map<K, V>(name, index_name, object_name, value_name, NULL, NULL, m, f);
}
template<class K, class V>
void encode_json_map(const char *name, const char *index_name, const char *value_name,
const std::map<K, V>& m, ceph::Formatter *f)
{
encode_json_map<K, V>(name, index_name, NULL, value_name, NULL, NULL, m, f);
}
template <class T>
static void encode_json(const char *name, const std::optional<T>& o, ceph::Formatter *f)
{
if (!o) {
return;
}
encode_json(name, *o, f);
}
template<class K, class V>
void encode_json_map(const char *name, const boost::container::flat_map<K, V>& m, ceph::Formatter *f)
{
f->open_array_section(name);
for (auto iter = m.cbegin(); iter != m.cend(); ++iter) {
encode_json("obj", iter->second, f);
}
f->close_section();
}
template<class K, class V>
void encode_json_map(const char *name, const char *index_name,
const char *object_name, const char *value_name,
void (*cb)(const char *, const V&, ceph::Formatter *, void *), void *parent,
const boost::container::flat_map<K, V>& m, ceph::Formatter *f)
{
f->open_array_section(name);
for (auto iter = m.cbegin(); iter != m.cend(); ++iter) {
if (index_name) {
f->open_object_section("key_value");
f->dump_string(index_name, iter->first);
}
if (object_name) {
f->open_object_section(object_name);
}
if (cb) {
cb(value_name, iter->second, f, parent);
} else {
encode_json(value_name, iter->second, f);
}
if (object_name) {
f->close_section();
}
if (index_name) {
f->close_section();
}
}
f->close_section();
}
template<class K, class V>
void encode_json_map(const char *name, const char *index_name,
const char *object_name, const char *value_name,
const boost::container::flat_map<K, V>& m, ceph::Formatter *f)
{
encode_json_map<K, V>(name, index_name, object_name, value_name, NULL, NULL, m, f);
}
template<class K, class V>
void encode_json_map(const char *name, const char *index_name, const char *value_name,
const boost::container::flat_map<K, V>& m, ceph::Formatter *f)
{
encode_json_map<K, V>(name, index_name, NULL, value_name, NULL, NULL, m, f);
}
class JSONFormattable : public ceph::JSONFormatter {
JSONObj::data_val value;
std::vector<JSONFormattable> arr;
std::map<std::string, JSONFormattable> obj;
std::vector<JSONFormattable *> enc_stack;
JSONFormattable *cur_enc;
protected:
bool handle_value(std::string_view name, std::string_view s, bool quoted) override;
bool handle_open_section(std::string_view name, const char *ns, bool section_is_array) override;
bool handle_close_section() override;
public:
JSONFormattable(bool p = false) : JSONFormatter(p) {
cur_enc = this;
enc_stack.push_back(cur_enc);
}
enum Type {
FMT_NONE,
FMT_VALUE,
FMT_ARRAY,
FMT_OBJ,
} type{FMT_NONE};
void set_type(Type t) {
type = t;
}
void decode_json(JSONObj *jo) {
if (jo->is_array()) {
set_type(JSONFormattable::FMT_ARRAY);
decode_json_obj(arr, jo);
} else if (jo->is_object()) {
set_type(JSONFormattable::FMT_OBJ);
auto iter = jo->find_first();
for (;!iter.end(); ++iter) {
JSONObj *field = *iter;
decode_json_obj(obj[field->get_name()], field);
}
} else {
set_type(JSONFormattable::FMT_VALUE);
decode_json_obj(value, jo);
}
}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(2, 1, bl);
encode((uint8_t)type, bl);
encode(value.str, bl);
encode(arr, bl);
encode(obj, bl);
encode(value.quoted, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(2, bl);
uint8_t t;
decode(t, bl);
type = (Type)t;
decode(value.str, bl);
decode(arr, bl);
decode(obj, bl);
if (struct_v >= 2) {
decode(value.quoted, bl);
} else {
value.quoted = true;
}
DECODE_FINISH(bl);
}
const std::string& val() const {
return value.str;
}
int val_int() const;
long val_long() const;
long long val_long_long() const;
bool val_bool() const;
const std::map<std::string, JSONFormattable> object() const {
return obj;
}
const std::vector<JSONFormattable>& array() const {
return arr;
}
const JSONFormattable& operator[](const std::string& name) const;
const JSONFormattable& operator[](size_t index) const;
JSONFormattable& operator[](const std::string& name);
JSONFormattable& operator[](size_t index);
operator std::string() const {
return value.str;
}
explicit operator int() const {
return val_int();
}
explicit operator long() const {
return val_long();
}
explicit operator long long() const {
return val_long_long();
}
explicit operator bool() const {
return val_bool();
}
template<class T>
T operator[](const std::string& name) const {
return this->operator[](name)(T());
}
template<class T>
T operator[](const std::string& name) {
return this->operator[](name)(T());
}
std::string operator ()(const char *def_val) const {
return def(std::string(def_val));
}
int operator()(int def_val) const {
return def(def_val);
}
bool operator()(bool def_val) const {
return def(def_val);
}
bool exists(const std::string& name) const;
bool exists(size_t index) const;
std::string def(const std::string& def_val) const;
int def(int def_val) const;
bool def(bool def_val) const;
bool find(const std::string& name, std::string *val) const;
std::string get(const std::string& name, const std::string& def_val) const;
int get_int(const std::string& name, int def_val) const;
bool get_bool(const std::string& name, bool def_val) const;
int set(const std::string& name, const std::string& val);
int erase(const std::string& name);
void derive_from(const JSONFormattable& jf);
void encode_json(const char *name, ceph::Formatter *f) const;
bool is_array() const {
return (type == FMT_ARRAY);
}
};
WRITE_CLASS_ENCODER(JSONFormattable)
void encode_json(const char *name, const JSONFormattable& v, ceph::Formatter *f);
#endif
| 23,382 | 24.035332 | 126 | h |
null | ceph-main/src/common/ceph_mutex.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <utility>
#include "common/containers.h"
// What and why
// ============
//
// For general code making use of mutexes, use these ceph:: types.
// The key requirement is that you make use of the ceph::make_mutex()
// and make_recursive_mutex() factory methods, which take a string
// naming the mutex for the purposes of the lockdep debug variant.
#if defined(WITH_SEASTAR) && !defined(WITH_ALIEN)
#include <seastar/core/condition-variable.hh>
#include "crimson/common/log.h"
#include "include/ceph_assert.h"
#ifndef NDEBUG
#define FUT_DEBUG(FMT_MSG, ...) crimson::get_logger(ceph_subsys_).trace(FMT_MSG, ##__VA_ARGS__)
#else
#define FUT_DEBUG(FMT_MSG, ...)
#endif
namespace ceph {
// an empty class satisfying the mutex concept
struct dummy_mutex {
void lock() {}
bool try_lock() {
return true;
}
void unlock() {}
void lock_shared() {}
void unlock_shared() {}
};
struct dummy_shared_mutex : dummy_mutex {
void lock_shared() {}
void unlock_shared() {}
};
// this implementation assumes running within a seastar::thread
struct green_condition_variable : private seastar::condition_variable {
template <class LockT>
void wait(LockT&&) {
FUT_DEBUG("green_condition_variable::{}: before blocking", __func__);
seastar::condition_variable::wait().get();
FUT_DEBUG("green_condition_variable::{}: after blocking", __func__);
}
void notify_one() noexcept {
FUT_DEBUG("green_condition_variable::{}", __func__);
signal();
}
void notify_all() noexcept {
FUT_DEBUG("green_condition_variable::{}", __func__);
broadcast();
}
};
using mutex = dummy_mutex;
using recursive_mutex = dummy_mutex;
using shared_mutex = dummy_shared_mutex;
using condition_variable = green_condition_variable;
template <typename ...Args>
dummy_mutex make_mutex(Args&& ...args) {
return {};
}
template <typename ...Args>
recursive_mutex make_recursive_mutex(Args&& ...args) {
return {};
}
template <typename ...Args>
shared_mutex make_shared_mutex(Args&& ...args) {
return {};
}
#define ceph_mutex_is_locked(m) true
#define ceph_mutex_is_locked_by_me(m) true
}
#else // defined (WITH_SEASTAR) && !defined(WITH_ALIEN)
//
// For legacy Mutex users that passed recursive=true, use
// ceph::make_recursive_mutex. For legacy Mutex users that passed
// lockdep=false, use std::mutex directly.
#ifdef CEPH_DEBUG_MUTEX
// ============================================================================
// debug (lockdep-capable, various sanity checks and asserts)
// ============================================================================
//
// Note: this is known to cause deadlocks on Windows because
// of the winpthreads shared mutex implementation.
#include "common/condition_variable_debug.h"
#include "common/mutex_debug.h"
#include "common/shared_mutex_debug.h"
namespace ceph {
typedef ceph::mutex_debug mutex;
typedef ceph::mutex_recursive_debug recursive_mutex;
typedef ceph::condition_variable_debug condition_variable;
typedef ceph::shared_mutex_debug shared_mutex;
// pass arguments to mutex_debug ctor
template <typename ...Args>
mutex make_mutex(Args&& ...args) {
return {std::forward<Args>(args)...};
}
// pass arguments to recursive_mutex_debug ctor
template <typename ...Args>
recursive_mutex make_recursive_mutex(Args&& ...args) {
return {std::forward<Args>(args)...};
}
// pass arguments to shared_mutex_debug ctor
template <typename ...Args>
shared_mutex make_shared_mutex(Args&& ...args) {
return {std::forward<Args>(args)...};
}
// debug methods
#define ceph_mutex_is_locked(m) ((m).is_locked())
#define ceph_mutex_is_not_locked(m) (!(m).is_locked())
#define ceph_mutex_is_rlocked(m) ((m).is_rlocked())
#define ceph_mutex_is_wlocked(m) ((m).is_wlocked())
#define ceph_mutex_is_locked_by_me(m) ((m).is_locked_by_me())
#define ceph_mutex_is_not_locked_by_me(m) (!(m).is_locked_by_me())
}
#else
// ============================================================================
// release (fast and minimal)
// ============================================================================
#include <condition_variable>
#include <mutex>
// The winpthreads shared mutex implementation is broken.
// We'll use boost::shared_mutex instead.
// https://github.com/msys2/MINGW-packages/issues/3319
#if __MINGW32__
#include <boost/thread/shared_mutex.hpp>
#else
#include <shared_mutex>
#endif
namespace ceph {
typedef std::mutex mutex;
typedef std::recursive_mutex recursive_mutex;
typedef std::condition_variable condition_variable;
#if __MINGW32__
typedef boost::shared_mutex shared_mutex;
#else
typedef std::shared_mutex shared_mutex;
#endif
// discard arguments to make_mutex (they are for debugging only)
template <typename ...Args>
mutex make_mutex(Args&& ...args) {
return {};
}
template <typename ...Args>
recursive_mutex make_recursive_mutex(Args&& ...args) {
return {};
}
template <typename ...Args>
shared_mutex make_shared_mutex(Args&& ...args) {
return {};
}
// debug methods. Note that these can blindly return true
// because any code that does anything other than assert these
// are true is broken.
#define ceph_mutex_is_locked(m) true
#define ceph_mutex_is_not_locked(m) true
#define ceph_mutex_is_rlocked(m) true
#define ceph_mutex_is_wlocked(m) true
#define ceph_mutex_is_locked_by_me(m) true
#define ceph_mutex_is_not_locked_by_me(m) true
}
#endif // CEPH_DEBUG_MUTEX
#endif // WITH_SEASTAR
namespace ceph {
template <class LockT,
class LockFactoryT>
ceph::containers::tiny_vector<LockT> make_lock_container(
const std::size_t num_instances,
LockFactoryT&& lock_factory)
{
return {
num_instances, [&](const std::size_t i, auto emplacer) {
// this will be called `num_instances` times
new (emplacer.data()) LockT {lock_factory(i)};
}
};
}
} // namespace ceph
| 6,126 | 26.977169 | 95 | h |
null | ceph-main/src/common/ceph_releases.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <cstdint>
#include <iosfwd>
#include <string_view>
#include "common/ceph_strings.h"
// the C++ version of CEPH_RELEASE_* defined by include/rados.h
enum class ceph_release_t : std::uint8_t {
unknown = 0,
argonaut,
bobtail,
cuttlefish,
dumpling,
emperor,
firefly,
giant,
hammer,
infernalis,
jewel,
kraken,
luminous,
mimic,
nautilus,
octopus,
pacific,
quincy,
reef,
max,
};
std::ostream& operator<<(std::ostream& os, const ceph_release_t r);
inline bool operator!(ceph_release_t& r) {
return (r < ceph_release_t::unknown ||
r == ceph_release_t::unknown);
}
inline ceph_release_t& operator--(ceph_release_t& r) {
r = static_cast<ceph_release_t>(static_cast<uint8_t>(r) - 1);
return r;
}
inline ceph_release_t& operator++(ceph_release_t& r) {
r = static_cast<ceph_release_t>(static_cast<uint8_t>(r) + 1);
return r;
}
inline bool operator<(ceph_release_t lhs, ceph_release_t rhs) {
// we used to use -1 for invalid release
if (static_cast<int8_t>(lhs) < 0) {
return true;
} else if (static_cast<int8_t>(rhs) < 0) {
return false;
}
return static_cast<uint8_t>(lhs) < static_cast<uint8_t>(rhs);
}
inline bool operator>(ceph_release_t lhs, ceph_release_t rhs) {
// we used to use -1 for invalid release
if (static_cast<int8_t>(lhs) < 0) {
return false;
} else if (static_cast<int8_t>(rhs) < 0) {
return true;
}
return static_cast<uint8_t>(lhs) > static_cast<uint8_t>(rhs);
}
inline bool operator>=(ceph_release_t lhs, ceph_release_t rhs) {
return !(lhs < rhs);
}
bool can_upgrade_from(ceph_release_t from_release,
std::string_view from_release_name,
std::ostream& err);
ceph_release_t ceph_release_from_name(std::string_view sv);
ceph_release_t ceph_release();
inline std::string_view to_string(ceph_release_t r) {
return ceph_release_name(static_cast<int>(r));
}
template<typename IntType> IntType to_integer(ceph_release_t r) {
return static_cast<IntType>(r);
}
| 2,108 | 22.433333 | 70 | h |
null | ceph-main/src/common/ceph_strings.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <cstdint>
const char *ceph_entity_type_name(int type);
const char *ceph_con_mode_name(int con_mode);
const char *ceph_osd_op_name(int op);
const char *ceph_osd_state_name(int s);
const char *ceph_release_name(int r);
std::uint64_t ceph_release_features(int r);
int ceph_release_from_features(std::uint64_t features);
const char *ceph_osd_watch_op_name(int o);
const char *ceph_osd_alloc_hint_flag_name(int f);
const char *ceph_mds_state_name(int s);
const char *ceph_session_op_name(int op);
const char *ceph_mds_op_name(int op);
const char *ceph_cap_op_name(int op);
const char *ceph_lease_op_name(int o);
const char *ceph_snap_op_name(int o);
const char *ceph_watch_event_name(int e);
const char *ceph_pool_op_name(int op);
const char *ceph_osd_backoff_op_name(int op);
| 895 | 33.461538 | 70 | h |
null | ceph-main/src/common/ceph_time.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef COMMON_CEPH_TIME_H
#define COMMON_CEPH_TIME_H
#include <chrono>
#include <iostream>
#include <string>
#include <optional>
#if FMT_VERSION >= 90000
#include <fmt/ostream.h>
#endif
#include <sys/time.h>
#if defined(__APPLE__)
#include <sys/_types/_timespec.h>
#define CLOCK_REALTIME_COARSE CLOCK_REALTIME
#define CLOCK_MONOTONIC_COARSE CLOCK_MONOTONIC
int clock_gettime(int clk_id, struct timespec *tp);
#endif
#ifdef _WIN32
// Clock precision:
// mingw < 8.0.1:
// * CLOCK_REALTIME: ~10-55ms (GetSystemTimeAsFileTime)
// mingw >= 8.0.1:
// * CLOCK_REALTIME: <1us (GetSystemTimePreciseAsFileTime)
// * CLOCK_REALTIME_COARSE: ~10-55ms (GetSystemTimeAsFileTime)
//
// * CLOCK_MONOTONIC: <1us if TSC is usable, ~10-55ms otherwise
// (QueryPerformanceCounter)
// https://github.com/mirror/mingw-w64/commit/dcd990ed423381cf35702df9495d44f1979ebe50
#ifndef CLOCK_REALTIME_COARSE
#define CLOCK_REALTIME_COARSE CLOCK_REALTIME
#endif
#ifndef CLOCK_MONOTONIC_COARSE
#define CLOCK_MONOTONIC_COARSE CLOCK_MONOTONIC
#endif
#endif
struct ceph_timespec;
namespace ceph {
// Currently we use a 64-bit count of nanoseconds.
// We could, if we wished, use a struct holding a uint64_t count
// of seconds and a uint32_t count of nanoseconds.
// At least this way we can change it to something else if we
// want.
typedef uint64_t rep;
// duration is the concrete time representation for our code in the
// case that we are only interested in durations between now and the
// future. Using it means we don't have to have EVERY function that
// deals with a duration be a template. We can do so for user-facing
// APIs, however.
typedef std::chrono::duration<rep, std::nano> timespan;
// Like the above but signed.
typedef int64_t signed_rep;
// Similar to the above but for durations that can specify
// differences between now and a time point in the past.
typedef std::chrono::duration<signed_rep, std::nano> signedspan;
template<typename Duration>
struct timeval to_timeval(Duration d) {
struct timeval tv;
auto sec = std::chrono::duration_cast<std::chrono::seconds>(d);
tv.tv_sec = sec.count();
auto usec = std::chrono::duration_cast<std::chrono::microseconds>(d-sec);
tv.tv_usec = usec.count();
return tv;
}
// We define our own clocks so we can have our choice of all time
// sources supported by the operating system. With the standard
// library the resolution and cost are unspecified. (For example,
// the libc++ system_clock class gives only microsecond
// resolution.)
// One potential issue is that we should accept system_clock
// timepoints in user-facing APIs alongside (or instead of)
// ceph::real_clock times.
// High-resolution real-time clock
class real_clock {
public:
typedef timespan duration;
typedef duration::rep rep;
typedef duration::period period;
// The second template parameter defaults to the clock's duration
// type.
typedef std::chrono::time_point<real_clock> time_point;
static constexpr const bool is_steady = false;
static time_point now() noexcept {
struct timespec ts;
clock_gettime(CLOCK_REALTIME, &ts);
return from_timespec(ts);
}
static bool is_zero(const time_point& t) {
return (t == time_point::min());
}
static time_point zero() {
return time_point::min();
}
// Allow conversion to/from any clock with the same interface as
// std::chrono::system_clock)
template<typename Clock, typename Duration>
static time_point to_system_time_point(
const std::chrono::time_point<Clock, Duration>& t) {
return time_point(seconds(Clock::to_time_t(t)) +
std::chrono::duration_cast<duration>(t.time_since_epoch() %
std::chrono::seconds(1)));
}
template<typename Clock, typename Duration>
static std::chrono::time_point<Clock, Duration> to_system_time_point(
const time_point& t) {
return (Clock::from_time_t(to_time_t(t)) +
std::chrono::duration_cast<Duration>(t.time_since_epoch() %
std::chrono::seconds(1)));
}
static time_t to_time_t(const time_point& t) noexcept {
return std::chrono::duration_cast<std::chrono::seconds>(t.time_since_epoch()).count();
}
static time_point from_time_t(const time_t& t) noexcept {
return time_point(std::chrono::seconds(t));
}
static void to_timespec(const time_point& t, struct timespec& ts) {
ts.tv_sec = to_time_t(t);
ts.tv_nsec = (t.time_since_epoch() % std::chrono::seconds(1)).count();
}
static struct timespec to_timespec(const time_point& t) {
struct timespec ts;
to_timespec(t, ts);
return ts;
}
static time_point from_timespec(const struct timespec& ts) {
return time_point(std::chrono::seconds(ts.tv_sec) +
std::chrono::nanoseconds(ts.tv_nsec));
}
static void to_ceph_timespec(const time_point& t,
struct ceph_timespec& ts);
static struct ceph_timespec to_ceph_timespec(const time_point& t);
static time_point from_ceph_timespec(const struct ceph_timespec& ts);
static void to_timeval(const time_point& t, struct timeval& tv) {
tv.tv_sec = to_time_t(t);
tv.tv_usec = std::chrono::duration_cast<std::chrono::microseconds>(
t.time_since_epoch() % std::chrono::seconds(1)).count();
}
static struct timeval to_timeval(const time_point& t) {
struct timeval tv;
to_timeval(t, tv);
return tv;
}
static time_point from_timeval(const struct timeval& tv) {
return time_point(std::chrono::seconds(tv.tv_sec) +
std::chrono::microseconds(tv.tv_usec));
}
static double to_double(const time_point& t) {
return std::chrono::duration<double>(t.time_since_epoch()).count();
}
static time_point from_double(const double d) {
return time_point(std::chrono::duration_cast<duration>(
std::chrono::duration<double>(d)));
}
};
// Low-resolution but preusmably faster real-time clock
class coarse_real_clock {
public:
typedef timespan duration;
typedef duration::rep rep;
typedef duration::period period;
// The second template parameter defaults to the clock's duration
// type.
typedef std::chrono::time_point<coarse_real_clock> time_point;
static constexpr const bool is_steady = false;
static time_point now() noexcept {
struct timespec ts;
#if defined(CLOCK_REALTIME_COARSE)
// Linux systems have _COARSE clocks.
clock_gettime(CLOCK_REALTIME_COARSE, &ts);
#elif defined(CLOCK_REALTIME_FAST)
// BSD systems have _FAST clocks.
clock_gettime(CLOCK_REALTIME_FAST, &ts);
#else
// And if we find neither, you may wish to consult your system's
// documentation.
#warning Falling back to CLOCK_REALTIME, may be slow.
clock_gettime(CLOCK_REALTIME, &ts);
#endif
return from_timespec(ts);
}
static bool is_zero(const time_point& t) {
return (t == time_point::min());
}
static time_point zero() {
return time_point::min();
}
static time_t to_time_t(const time_point& t) noexcept {
return std::chrono::duration_cast<std::chrono::seconds>(
t.time_since_epoch()).count();
}
static time_point from_time_t(const time_t t) noexcept {
return time_point(std::chrono::seconds(t));
}
static void to_timespec(const time_point& t, struct timespec& ts) {
ts.tv_sec = to_time_t(t);
ts.tv_nsec = (t.time_since_epoch() % std::chrono::seconds(1)).count();
}
static struct timespec to_timespec(const time_point& t) {
struct timespec ts;
to_timespec(t, ts);
return ts;
}
static time_point from_timespec(const struct timespec& ts) {
return time_point(std::chrono::seconds(ts.tv_sec) +
std::chrono::nanoseconds(ts.tv_nsec));
}
static void to_ceph_timespec(const time_point& t,
struct ceph_timespec& ts);
static struct ceph_timespec to_ceph_timespec(const time_point& t);
static time_point from_ceph_timespec(const struct ceph_timespec& ts);
static void to_timeval(const time_point& t, struct timeval& tv) {
tv.tv_sec = to_time_t(t);
tv.tv_usec = std::chrono::duration_cast<std::chrono::microseconds>(
t.time_since_epoch() % std::chrono::seconds(1)).count();
}
static struct timeval to_timeval(const time_point& t) {
struct timeval tv;
to_timeval(t, tv);
return tv;
}
static time_point from_timeval(const struct timeval& tv) {
return time_point(std::chrono::seconds(tv.tv_sec) +
std::chrono::microseconds(tv.tv_usec));
}
static double to_double(const time_point& t) {
return std::chrono::duration<double>(t.time_since_epoch()).count();
}
static time_point from_double(const double d) {
return time_point(std::chrono::duration_cast<duration>(
std::chrono::duration<double>(d)));
}
};
// High-resolution monotonic clock
class mono_clock {
public:
typedef timespan duration;
typedef duration::rep rep;
typedef duration::period period;
typedef std::chrono::time_point<mono_clock> time_point;
static constexpr const bool is_steady = true;
static time_point now() noexcept {
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
return time_point(std::chrono::seconds(ts.tv_sec) +
std::chrono::nanoseconds(ts.tv_nsec));
}
static bool is_zero(const time_point& t) {
return (t == time_point::min());
}
static time_point zero() {
return time_point::min();
}
};
// Low-resolution but, I would hope or there's no point, faster
// monotonic clock
class coarse_mono_clock {
public:
typedef timespan duration;
typedef duration::rep rep;
typedef duration::period period;
typedef std::chrono::time_point<coarse_mono_clock> time_point;
static constexpr const bool is_steady = true;
static time_point now() noexcept {
struct timespec ts;
#if defined(CLOCK_MONOTONIC_COARSE)
// Linux systems have _COARSE clocks.
clock_gettime(CLOCK_MONOTONIC_COARSE, &ts);
#elif defined(CLOCK_MONOTONIC_FAST)
// BSD systems have _FAST clocks.
clock_gettime(CLOCK_MONOTONIC_FAST, &ts);
#else
// And if we find neither, you may wish to consult your system's
// documentation.
#warning Falling back to CLOCK_MONOTONIC, may be slow.
clock_gettime(CLOCK_MONOTONIC, &ts);
#endif
return time_point(std::chrono::seconds(ts.tv_sec) +
std::chrono::nanoseconds(ts.tv_nsec));
}
static bool is_zero(const time_point& t) {
return (t == time_point::min());
}
static time_point zero() {
return time_point::min();
}
};
namespace time_detail {
// So that our subtractions produce negative spans rather than
// arithmetic underflow.
template<typename Rep1, typename Period1, typename Rep2,
typename Period2>
inline auto difference(std::chrono::duration<Rep1, Period1> minuend,
std::chrono::duration<Rep2, Period2> subtrahend)
-> typename std::common_type<
std::chrono::duration<typename std::make_signed<Rep1>::type,
Period1>,
std::chrono::duration<typename std::make_signed<Rep2>::type,
Period2> >::type {
// Foo.
using srep =
typename std::common_type<
std::chrono::duration<typename std::make_signed<Rep1>::type,
Period1>,
std::chrono::duration<typename std::make_signed<Rep2>::type,
Period2> >::type;
return srep(srep(minuend).count() - srep(subtrahend).count());
}
template<typename Clock, typename Duration1, typename Duration2>
inline auto difference(
typename std::chrono::time_point<Clock, Duration1> minuend,
typename std::chrono::time_point<Clock, Duration2> subtrahend)
-> typename std::common_type<
std::chrono::duration<typename std::make_signed<
typename Duration1::rep>::type,
typename Duration1::period>,
std::chrono::duration<typename std::make_signed<
typename Duration2::rep>::type,
typename Duration2::period> >::type {
return difference(minuend.time_since_epoch(),
subtrahend.time_since_epoch());
}
}
// Please note that the coarse clocks are disjoint. You cannot
// subtract a real_clock timepoint from a coarse_real_clock
// timepoint as, from C++'s perspective, they are disjoint types.
// This is not necessarily bad. If I sample a mono_clock and then a
// coarse_mono_clock, the coarse_mono_clock's time could potentially
// be previous to the mono_clock's time (just due to differing
// resolution) which would be Incorrect.
// This is not horrible, though, since you can use an idiom like
// mono_clock::timepoint(coarsepoint.time_since_epoch()) to unwrap
// and rewrap if you know what you're doing.
// Actual wall-clock times
typedef real_clock::time_point real_time;
typedef coarse_real_clock::time_point coarse_real_time;
// Monotonic times should never be serialized or communicated
// between machines, since they are incomparable. Thus we also don't
// make any provision for converting between
// std::chrono::steady_clock time and ceph::mono_clock time.
typedef mono_clock::time_point mono_time;
typedef coarse_mono_clock::time_point coarse_mono_time;
template<typename Rep1, typename Ratio1, typename Rep2, typename Ratio2>
auto floor(const std::chrono::duration<Rep1, Ratio1>& duration,
const std::chrono::duration<Rep2, Ratio2>& precision) ->
typename std::common_type<std::chrono::duration<Rep1, Ratio1>,
std::chrono::duration<Rep2, Ratio2> >::type {
return duration - (duration % precision);
}
template<typename Rep1, typename Ratio1, typename Rep2, typename Ratio2>
auto ceil(const std::chrono::duration<Rep1, Ratio1>& duration,
const std::chrono::duration<Rep2, Ratio2>& precision) ->
typename std::common_type<std::chrono::duration<Rep1, Ratio1>,
std::chrono::duration<Rep2, Ratio2> >::type {
auto tmod = duration % precision;
return duration - tmod + (tmod > tmod.zero() ? 1 : 0) * precision;
}
template<typename Clock, typename Duration, typename Rep, typename Ratio>
auto floor(const std::chrono::time_point<Clock, Duration>& timepoint,
const std::chrono::duration<Rep, Ratio>& precision) ->
std::chrono::time_point<Clock,
typename std::common_type<
Duration, std::chrono::duration<Rep, Ratio>
>::type> {
return std::chrono::time_point<
Clock, typename std::common_type<
Duration, std::chrono::duration<Rep, Ratio> >::type>(
floor(timepoint.time_since_epoch(), precision));
}
template<typename Clock, typename Duration, typename Rep, typename Ratio>
auto ceil(const std::chrono::time_point<Clock, Duration>& timepoint,
const std::chrono::duration<Rep, Ratio>& precision) ->
std::chrono::time_point<Clock,
typename std::common_type<
Duration,
std::chrono::duration<Rep, Ratio> >::type> {
return std::chrono::time_point<
Clock, typename std::common_type<
Duration, std::chrono::duration<Rep, Ratio> >::type>(
ceil(timepoint.time_since_epoch(), precision));
}
inline timespan make_timespan(const double d) {
return std::chrono::duration_cast<timespan>(
std::chrono::duration<double>(d));
}
inline std::optional<timespan> maybe_timespan(const double d) {
return d ? std::make_optional(make_timespan(d)) : std::nullopt;
}
template<typename Clock,
typename std::enable_if<!Clock::is_steady>::type* = nullptr>
std::ostream& operator<<(std::ostream& m,
const std::chrono::time_point<Clock>& t);
template<typename Clock,
typename std::enable_if<Clock::is_steady>::type* = nullptr>
std::ostream& operator<<(std::ostream& m,
const std::chrono::time_point<Clock>& t);
// The way std::chrono handles the return type of subtraction is not
// wonderful. The difference of two unsigned types SHOULD be signed.
inline signedspan operator -(real_time minuend,
real_time subtrahend) {
return time_detail::difference(minuend, subtrahend);
}
inline signedspan operator -(coarse_real_time minuend,
coarse_real_time subtrahend) {
return time_detail::difference(minuend, subtrahend);
}
inline signedspan operator -(mono_time minuend,
mono_time subtrahend) {
return time_detail::difference(minuend, subtrahend);
}
inline signedspan operator -(coarse_mono_time minuend,
coarse_mono_time subtrahend) {
return time_detail::difference(minuend, subtrahend);
}
// We could add specializations of time_point - duration and
// time_point + duration to assert on overflow, but I don't think we
// should.
inline timespan abs(signedspan z) {
return z > signedspan::zero() ?
std::chrono::duration_cast<timespan>(z) :
timespan(-z.count());
}
inline timespan to_timespan(signedspan z) {
if (z < signedspan::zero()) {
//ceph_assert(z >= signedspan::zero());
// There is a kernel bug that seems to be triggering this assert. We've
// seen it in:
// centos 8.1: 4.18.0-147.el8.x86_64
// debian 10.3: 4.19.0-8-amd64
// debian 10.1: 4.19.67-2+deb10u1
// ubuntu 18.04
// see bugs:
// https://tracker.ceph.com/issues/43365
// https://tracker.ceph.com/issues/44078
z = signedspan::zero();
}
return std::chrono::duration_cast<timespan>(z);
}
std::string timespan_str(timespan t);
std::string exact_timespan_str(timespan t);
std::chrono::seconds parse_timespan(const std::string& s);
// detects presence of Clock::to_timespec() and from_timespec()
template <typename Clock, typename = std::void_t<>>
struct converts_to_timespec : std::false_type {};
template <typename Clock>
struct converts_to_timespec<Clock, std::void_t<decltype(
Clock::from_timespec(Clock::to_timespec(
std::declval<typename Clock::time_point>()))
)>> : std::true_type {};
template <typename Clock>
constexpr bool converts_to_timespec_v = converts_to_timespec<Clock>::value;
template<typename Rep, typename T>
static Rep to_seconds(T t) {
return std::chrono::duration_cast<
std::chrono::duration<Rep>>(t).count();
}
template<typename Rep, typename T>
static Rep to_microseconds(T t) {
return std::chrono::duration_cast<
std::chrono::duration<
Rep,
std::micro>>(t).count();
}
} // namespace ceph
namespace std {
template<typename Rep, typename Period>
ostream& operator<<(ostream& m, const chrono::duration<Rep, Period>& t);
}
#if FMT_VERSION >= 90000
template<typename Clock>
struct fmt::formatter<std::chrono::time_point<Clock>> : fmt::ostream_formatter {};
#endif
#endif // COMMON_CEPH_TIME_H
| 18,525 | 32.200717 | 90 | h |
null | ceph-main/src/common/ceph_timer.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef COMMON_CEPH_TIMER_H
#define COMMON_CEPH_TIMER_H
#include <cassert>
#include <condition_variable>
#include <cstdint>
#include <functional>
#include <memory>
#include <mutex>
#include <thread>
#include <boost/intrusive/set.hpp>
#include "include/function2.hpp"
#include "include/compat.h"
#include "common/detail/construct_suspended.h"
namespace bi = boost::intrusive;
namespace ceph {
// Compared to the SafeTimer this does fewer allocations (you
// don't have to allocate a new Context every time you
// want to cue the next tick.)
//
// It also does not share a lock with the caller. If you call
// cancel event, it either cancels the event (and returns true) or
// you missed it. If this does not work for you, you can set up a
// flag and mutex of your own.
//
// You get to pick your clock. I like mono_clock, since I usually
// want to wait FOR a given duration. real_clock is worthwhile if
// you want to wait UNTIL a specific moment of wallclock time. If
// you want you can set up a timer that executes a function after
// you use up ten seconds of CPU time.
template<typename TC>
class timer {
using sh = bi::set_member_hook<bi::link_mode<bi::normal_link>>;
struct event {
typename TC::time_point t = typename TC::time_point::min();
std::uint64_t id = 0;
fu2::unique_function<void()> f;
sh schedule_link;
sh event_link;
event() = default;
event(typename TC::time_point t, std::uint64_t id,
fu2::unique_function<void()> f) : t(t), id(id), f(std::move(f)) {}
event(const event&) = delete;
event& operator =(const event&) = delete;
event(event&&) = delete;
event& operator =(event&&) = delete;
bool operator <(const event& e) const noexcept {
return t == e.t ? id < e.id : t < e.t;
}
};
struct id_key {
using type = std::uint64_t;
const type& operator ()(const event& e) const noexcept {
return e.id;
}
};
bi::set<event, bi::member_hook<event, sh, &event::schedule_link>,
bi::constant_time_size<false>> schedule;
bi::set<event, bi::member_hook<event, sh, &event::event_link>,
bi::constant_time_size<false>,
bi::key_of_value<id_key>> events;
std::mutex lock;
std::condition_variable cond;
event* running = nullptr;
std::uint64_t next_id = 0;
bool suspended;
std::thread thread;
void timer_thread() {
std::unique_lock l(lock);
while (!suspended) {
auto now = TC::now();
while (!schedule.empty()) {
auto p = schedule.begin();
// Should we wait for the future?
if (p->t > now)
break;
auto& e = *p;
schedule.erase(e);
events.erase(e.id);
// Since we have only one thread it is impossible to have more
// than one running event
running = &e;
l.unlock();
p->f();
l.lock();
if (running) {
running = nullptr;
delete &e;
} // Otherwise the event requeued itself
}
if (suspended)
break;
if (schedule.empty()) {
cond.wait(l);
} else {
// Since wait_until takes its parameter by reference, passing
// the time /in the event/ is unsafe, as it might be canceled
// while we wait.
const auto t = schedule.begin()->t;
cond.wait_until(l, t);
}
}
}
public:
timer() : suspended(false) {
thread = std::thread(&timer::timer_thread, this);
ceph_pthread_setname(thread.native_handle(), "ceph_timer");
}
// Create a suspended timer, jobs will be executed in order when
// it is resumed.
timer(construct_suspended_t) : suspended(true) {}
timer(const timer&) = delete;
timer& operator =(const timer&) = delete;
~timer() {
suspend();
cancel_all_events();
}
// Suspend operation of the timer (and let its thread die).
void suspend() {
std::unique_lock l(lock);
if (suspended)
return;
suspended = true;
cond.notify_one();
l.unlock();
thread.join();
}
// Resume operation of the timer. (Must have been previously
// suspended.)
void resume() {
std::unique_lock l(lock);
if (!suspended)
return;
suspended = false;
assert(!thread.joinable());
thread = std::thread(&timer::timer_thread, this);
}
// Schedule an event in the relative future
template<typename Callable, typename... Args>
std::uint64_t add_event(typename TC::duration duration,
Callable&& f, Args&&... args) {
return add_event(TC::now() + duration,
std::forward<Callable>(f),
std::forward<Args>(args)...);
}
// Schedule an event in the absolute future
template<typename Callable, typename... Args>
std::uint64_t add_event(typename TC::time_point when,
Callable&& f, Args&&... args) {
std::lock_guard l(lock);
auto e = std::make_unique<event>(when, ++next_id,
std::bind(std::forward<Callable>(f),
std::forward<Args>(args)...));
auto id = e->id;
auto i = schedule.insert(*e);
events.insert(*(e.release()));
/* If the event we have just inserted comes before everything
* else, we need to adjust our timeout. */
if (i.first == schedule.begin())
cond.notify_one();
// Previously each event was a context, identified by a
// pointer, and each context to be called only once. Since you
// can queue the same function pointer, member function,
// lambda, or functor up multiple times, identifying things by
// function for the purposes of cancellation is no longer
// suitable. Thus:
return id;
}
// Adjust the timeout of a currently-scheduled event (relative)
bool adjust_event(std::uint64_t id, typename TC::duration duration) {
return adjust_event(id, TC::now() + duration);
}
// Adjust the timeout of a currently-scheduled event (absolute)
bool adjust_event(std::uint64_t id, typename TC::time_point when) {
std::lock_guard l(lock);
auto it = events.find(id);
if (it == events.end())
return false;
auto& e = *it;
schedule.erase(e);
e.t = when;
schedule.insert(e);
return true;
}
// Cancel an event. If the event has already come and gone (or you
// never submitted it) you will receive false. Otherwise you will
// receive true and it is guaranteed the event will not execute.
bool cancel_event(const std::uint64_t id) {
std::lock_guard l(lock);
auto p = events.find(id);
if (p == events.end()) {
return false;
}
auto& e = *p;
events.erase(e.id);
schedule.erase(e);
delete &e;
return true;
}
// Reschedules a currently running event in the relative
// future. Must be called only from an event executed by this
// timer. If you have a function that can be called either from
// this timer or some other way, it is your responsibility to make
// sure it can tell the difference only does not call
// reschedule_me in the non-timer case.
//
// Returns an event id. If you had an event_id from the first
// scheduling, replace it with this return value.
std::uint64_t reschedule_me(typename TC::duration duration) {
return reschedule_me(TC::now() + duration);
}
// Reschedules a currently running event in the absolute
// future. Must be called only from an event executed by this
// timer. if you have a function that can be called either from
// this timer or some other way, it is your responsibility to make
// sure it can tell the difference only does not call
// reschedule_me in the non-timer case.
//
// Returns an event id. If you had an event_id from the first
// scheduling, replace it with this return value.
std::uint64_t reschedule_me(typename TC::time_point when) {
assert(std::this_thread::get_id() == thread.get_id());
std::lock_guard l(lock);
running->t = when;
std::uint64_t id = ++next_id;
running->id = id;
schedule.insert(*running);
events.insert(*running);
// Hacky, but keeps us from being deleted
running = nullptr;
// Same function, but you get a new ID.
return id;
}
// Remove all events from the queue.
void cancel_all_events() {
std::lock_guard l(lock);
while (!events.empty()) {
auto p = events.begin();
event& e = *p;
schedule.erase(e);
events.erase(e.id);
delete &e;
}
}
}; // timer
} // namespace ceph
#endif
| 8,643 | 26.616613 | 71 | h |
null | ceph-main/src/common/cmdparse.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_COMMON_CMDPARSE_H
#define CEPH_COMMON_CMDPARSE_H
#include <vector>
#include <stdexcept>
#include <optional>
#include <ostream>
#include <boost/variant.hpp>
#include "include/ceph_assert.h" // boost clobbers this
#include "include/common_fwd.h"
#include "common/Formatter.h"
#include "common/BackTrace.h"
typedef boost::variant<std::string,
bool,
int64_t,
double,
std::vector<std::string>,
std::vector<int64_t>,
std::vector<double>> cmd_vartype;
typedef std::map<std::string, cmd_vartype, std::less<>> cmdmap_t;
namespace ceph::common {
std::string cmddesc_get_prefix(const std::string_view &cmddesc);
std::string cmddesc_get_prenautilus_compat(const std::string &cmddesc);
void dump_cmd_to_json(ceph::Formatter *f, uint64_t features,
const std::string& cmd);
void dump_cmd_and_help_to_json(ceph::Formatter *f,
uint64_t features,
const std::string& secname,
const std::string& cmd,
const std::string& helptext);
void dump_cmddesc_to_json(ceph::Formatter *jf,
uint64_t features,
const std::string& secname,
const std::string& cmdsig,
const std::string& helptext,
const std::string& module,
const std::string& perm,
uint64_t flags);
bool cmdmap_from_json(const std::vector<std::string>& cmd, cmdmap_t *mapp,
std::ostream& ss);
void cmdmap_dump(const cmdmap_t &cmdmap, ceph::Formatter *f);
void handle_bad_get(CephContext *cct, const std::string& k, const char *name);
std::string cmd_vartype_stringify(const cmd_vartype& v);
struct bad_cmd_get : public std::exception {
std::string desc;
bad_cmd_get(std::string_view f, const cmdmap_t& cmdmap) {
desc += "bad or missing field '";
desc += f;
desc += "'";
}
const char *what() const throw() override {
return desc.c_str();
}
};
bool cmd_getval(const cmdmap_t& cmdmap,
std::string_view k, bool& val);
bool cmd_getval_compat_cephbool(
const cmdmap_t& cmdmap,
const std::string& k, bool& val);
template <typename T>
bool cmd_getval(const cmdmap_t& cmdmap,
std::string_view k, T& val)
{
auto found = cmdmap.find(k);
if (found == cmdmap.end()) {
return false;
}
try {
val = boost::get<T>(found->second);
return true;
} catch (boost::bad_get&) {
throw bad_cmd_get(k, cmdmap);
}
}
template <typename T>
std::optional<T> cmd_getval(const cmdmap_t& cmdmap,
std::string_view k)
{
T ret;
if (const bool found = cmd_getval(cmdmap, k, ret); found) {
return std::make_optional(std::move(ret));
} else {
return std::nullopt;
}
}
// with default
template <typename T, typename V>
T cmd_getval_or(const cmdmap_t& cmdmap, std::string_view k,
const V& defval)
{
auto found = cmdmap.find(k);
if (found == cmdmap.end()) {
return T(defval);
}
try {
return boost::get<T>(cmdmap.find(k)->second);
} catch (boost::bad_get&) {
throw bad_cmd_get(k, cmdmap);
}
}
template <typename T>
void
cmd_putval(CephContext *cct, cmdmap_t& cmdmap, std::string_view k, const T& val)
{
cmdmap.insert_or_assign(std::string{k}, val);
}
bool validate_cmd(const std::string& desc,
const cmdmap_t& cmdmap,
std::ostream& os);
extern int parse_osd_id(const char *s, std::ostream *pss);
extern long parse_pos_long(const char *s, std::ostream *pss = NULL);
}
#endif
| 3,517 | 26.271318 | 80 | h |
null | ceph-main/src/common/code_environment.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_COMMON_CODE_ENVIRONMENT_H
#define CEPH_COMMON_CODE_ENVIRONMENT_H
enum code_environment_t {
CODE_ENVIRONMENT_UTILITY = 0,
CODE_ENVIRONMENT_DAEMON = 1,
CODE_ENVIRONMENT_LIBRARY = 2,
CODE_ENVIRONMENT_UTILITY_NODOUT = 3,
};
#ifdef __cplusplus
#include <iosfwd>
#include <string>
extern "C" code_environment_t g_code_env;
extern "C" const char *code_environment_to_str(enum code_environment_t e);
std::ostream &operator<<(std::ostream &oss, const enum code_environment_t e);
extern "C" int get_process_name(char *buf, int len);
std::string get_process_name_cpp();
#else
extern code_environment_t g_code_env;
const char *code_environment_to_str(const enum code_environment_t e);
extern int get_process_name(char *buf, int len);
#endif
#endif
| 1,173 | 25.681818 | 77 | h |
null | ceph-main/src/common/cohort_lru.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Copyright (C) 2015 CohortFS, LLC.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef COHORT_LRU_H
#define COHORT_LRU_H
#include <boost/intrusive/list.hpp>
#include <boost/intrusive/slist.hpp>
#include "common/likely.h"
#ifndef CACHE_LINE_SIZE
#define CACHE_LINE_SIZE 64 /* XXX arch-specific define */
#endif
#define CACHE_PAD(_n) char __pad ## _n [CACHE_LINE_SIZE]
namespace cohort {
namespace lru {
namespace bi = boost::intrusive;
/* public flag values */
constexpr uint32_t FLAG_NONE = 0x0000;
constexpr uint32_t FLAG_INITIAL = 0x0001;
constexpr uint32_t FLAG_RECYCLE = 0x0002;
enum class Edge : std::uint8_t
{
MRU = 0,
LRU
};
typedef bi::link_mode<bi::safe_link> link_mode;
class ObjectFactory; // Forward declaration
class Object
{
private:
uint32_t lru_flags;
std::atomic<uint32_t> lru_refcnt;
std::atomic<uint32_t> lru_adj;
bi::list_member_hook< link_mode > lru_hook;
typedef bi::list<Object,
bi::member_hook<
Object, bi::list_member_hook< link_mode >,
&Object::lru_hook >,
bi::constant_time_size<true>> Queue;
bi::slist_member_hook< link_mode > q2_hook;
typedef bi::slist<Object,
bi::member_hook<
Object, bi::slist_member_hook< link_mode >,
&Object::q2_hook >,
bi::constant_time_size<true>> Queue2;
public:
Object() : lru_flags(FLAG_NONE), lru_refcnt(0), lru_adj(0) {}
uint32_t get_refcnt() const { return lru_refcnt; }
virtual bool reclaim(const ObjectFactory* newobj_fac) = 0;
virtual ~Object() {}
private:
template <typename LK>
friend class LRU;
template <typename T, typename TTree, typename CLT, typename CEQ,
typename K, typename LK>
friend class TreeX;
};
/* allocator & recycler interface (create or re-use LRU objects) */
class ObjectFactory
{
public:
virtual Object* alloc(void) = 0;
virtual void recycle(Object*) = 0;
virtual ~ObjectFactory() {};
};
template <typename LK>
class LRU
{
private:
struct Lane {
LK lock;
Object::Queue q;
// Object::Queue pinned; /* placeholder for possible expansion */
CACHE_PAD(0);
Lane() {}
};
Lane *qlane;
int n_lanes;
std::atomic<uint32_t> evict_lane;
const uint32_t lane_hiwat;
static constexpr uint32_t lru_adj_modulus = 5;
static constexpr uint32_t SENTINEL_REFCNT = 1;
/* internal flag values */
static constexpr uint32_t FLAG_INLRU = 0x0001;
static constexpr uint32_t FLAG_PINNED = 0x0002; // possible future use
static constexpr uint32_t FLAG_EVICTING = 0x0004;
Lane& lane_of(void* addr) {
return qlane[(uint64_t)(addr) % n_lanes];
}
uint32_t next_evict_lane() {
return (evict_lane++ % n_lanes);
}
bool can_reclaim(Object* o) {
return ((o->lru_refcnt == SENTINEL_REFCNT) &&
(!(o->lru_flags & FLAG_EVICTING)));
}
Object* evict_block(const ObjectFactory* newobj_fac) {
uint32_t lane_ix = next_evict_lane();
for (int ix = 0; ix < n_lanes; ++ix,
lane_ix = next_evict_lane()) {
Lane& lane = qlane[lane_ix];
std::unique_lock lane_lock{lane.lock};
/* if object at LRU has refcnt==1, it may be reclaimable */
Object* o = &(lane.q.back());
if (can_reclaim(o)) {
++(o->lru_refcnt);
o->lru_flags |= FLAG_EVICTING;
lane_lock.unlock();
if (o->reclaim(newobj_fac)) {
lane_lock.lock();
--(o->lru_refcnt);
/* assertions that o state has not changed across
* relock */
ceph_assert(o->lru_refcnt == SENTINEL_REFCNT);
ceph_assert(o->lru_flags & FLAG_INLRU);
Object::Queue::iterator it =
Object::Queue::s_iterator_to(*o);
lane.q.erase(it);
return o;
} else {
--(o->lru_refcnt);
o->lru_flags &= ~FLAG_EVICTING;
/* unlock in next block */
}
} /* can_reclaim(o) */
} /* each lane */
return nullptr;
} /* evict_block */
public:
LRU(int lanes, uint32_t _hiwat)
: n_lanes(lanes), evict_lane(0), lane_hiwat(_hiwat)
{
ceph_assert(n_lanes > 0);
qlane = new Lane[n_lanes];
}
~LRU() { delete[] qlane; }
bool ref(Object* o, uint32_t flags) {
++(o->lru_refcnt);
if (flags & FLAG_INITIAL) {
if ((++(o->lru_adj) % lru_adj_modulus) == 0) {
Lane& lane = lane_of(o);
lane.lock.lock();
/* move to MRU */
Object::Queue::iterator it =
Object::Queue::s_iterator_to(*o);
lane.q.erase(it);
lane.q.push_front(*o);
lane.lock.unlock();
} /* adj */
} /* initial ref */
return true;
} /* ref */
void unref(Object* o, uint32_t flags) {
uint32_t refcnt = --(o->lru_refcnt);
Object* tdo = nullptr;
if (unlikely(refcnt == 0)) {
Lane& lane = lane_of(o);
lane.lock.lock();
refcnt = o->lru_refcnt.load();
if (unlikely(refcnt == 0)) {
Object::Queue::iterator it =
Object::Queue::s_iterator_to(*o);
lane.q.erase(it);
tdo = o;
}
lane.lock.unlock();
} else if (unlikely(refcnt == SENTINEL_REFCNT)) {
Lane& lane = lane_of(o);
lane.lock.lock();
refcnt = o->lru_refcnt.load();
if (likely(refcnt == SENTINEL_REFCNT)) {
/* move to LRU */
Object::Queue::iterator it =
Object::Queue::s_iterator_to(*o);
lane.q.erase(it);
/* hiwat check */
if (lane.q.size() > lane_hiwat) {
tdo = o;
} else {
lane.q.push_back(*o);
}
}
lane.lock.unlock();
}
/* unref out-of-line && !LOCKED */
if (tdo)
delete tdo;
} /* unref */
Object* insert(ObjectFactory* fac, Edge edge, uint32_t& flags) {
/* use supplied functor to re-use an evicted object, or
* allocate a new one of the descendant type */
Object* o = evict_block(fac);
if (o) {
fac->recycle(o); /* recycle existing object */
flags |= FLAG_RECYCLE;
}
else
o = fac->alloc(); /* get a new one */
o->lru_flags = FLAG_INLRU;
Lane& lane = lane_of(o);
lane.lock.lock();
switch (edge) {
case Edge::MRU:
lane.q.push_front(*o);
break;
case Edge::LRU:
lane.q.push_back(*o);
break;
default:
ceph_abort();
break;
}
if (flags & FLAG_INITIAL)
o->lru_refcnt += 2; /* sentinel ref + initial */
else
++(o->lru_refcnt); /* sentinel */
lane.lock.unlock();
return o;
} /* insert */
};
template <typename T, typename TTree, typename CLT, typename CEQ,
typename K, typename LK>
class TreeX
{
public:
static constexpr uint32_t FLAG_NONE = 0x0000;
static constexpr uint32_t FLAG_LOCK = 0x0001;
static constexpr uint32_t FLAG_UNLOCK = 0x0002;
static constexpr uint32_t FLAG_UNLOCK_ON_MISS = 0x0004;
typedef T value_type;
typedef TTree container_type;
typedef typename TTree::iterator iterator;
typedef std::pair<iterator, bool> check_result;
typedef typename TTree::insert_commit_data insert_commit_data;
int n_part;
int csz;
typedef std::unique_lock<LK> unique_lock;
struct Partition {
LK lock;
TTree tr;
T** cache;
int csz;
CACHE_PAD(0);
Partition() : tr(), cache(nullptr), csz(0) {}
~Partition() {
if (csz)
::operator delete(cache);
}
};
struct Latch {
Partition* p;
LK* lock;
insert_commit_data commit_data{};
Latch() : p(nullptr), lock(nullptr) {}
};
Partition& partition_of_scalar(uint64_t x) {
return part[x % n_part];
}
Partition& get(uint8_t x) {
return part[x];
}
Partition*& get() {
return part;
}
void lock() {
std::for_each(locks.begin(), locks.end(),
[](LK* lk){ lk->lock(); });
}
void unlock() {
std::for_each(locks.begin(), locks.end(),
[](LK* lk){ lk->unlock(); });
}
TreeX(int n_part=1, int csz=127) : n_part(n_part), csz(csz) {
ceph_assert(n_part > 0);
part = new Partition[n_part];
for (int ix = 0; ix < n_part; ++ix) {
Partition& p = part[ix];
if (csz) {
p.csz = csz;
p.cache = (T**) ::operator new(csz * sizeof(T*));
// FIPS zeroization audit 20191115: this memset is not security related.
memset(p.cache, 0, csz * sizeof(T*));
}
locks.push_back(&p.lock);
}
}
~TreeX() {
delete[] part;
}
T* find(uint64_t hk, const K& k, uint32_t flags) {
T* v;
Latch lat;
uint32_t slot = 0;
lat.p = &(partition_of_scalar(hk));
if (flags & FLAG_LOCK) {
lat.lock = &lat.p->lock;
lat.lock->lock();
}
if (csz) { /* template specialize? */
slot = hk % csz;
v = lat.p->cache[slot];
if (v) {
if (CEQ()(*v, k)) {
if (flags & FLAG_LOCK)
lat.lock->unlock();
return v;
}
v = nullptr;
}
} else {
v = nullptr;
}
iterator it = lat.p->tr.find(k, CLT());
if (it != lat.p->tr.end()){
v = &(*(it));
if (csz) {
/* fill cache slot at hk */
lat.p->cache[slot] = v;
}
}
if (flags & FLAG_LOCK)
lat.lock->unlock();
return v;
} /* find */
T* find_latch(uint64_t hk, const K& k, Latch& lat,
uint32_t flags) {
uint32_t slot = 0;
T* v;
lat.p = &(partition_of_scalar(hk));
lat.lock = &lat.p->lock;
if (flags & FLAG_LOCK)
lat.lock->lock();
if (csz) { /* template specialize? */
slot = hk % csz;
v = lat.p->cache[slot];
if (v) {
if (CEQ()(*v, k)) {
if ((flags & FLAG_LOCK) && (flags & FLAG_UNLOCK))
lat.lock->unlock();
return v;
}
v = nullptr;
}
} else {
v = nullptr;
}
check_result r = lat.p->tr.insert_unique_check(
k, CLT(), lat.commit_data);
if (! r.second /* !insertable (i.e., !found) */) {
v = &(*(r.first));
if (csz) {
/* fill cache slot at hk */
lat.p->cache[slot] = v;
}
}
if ((flags & FLAG_LOCK) && (flags & FLAG_UNLOCK))
lat.lock->unlock();
return v;
} /* find_latch */
bool is_same_partition(uint64_t lhs, uint64_t rhs) {
return ((lhs % n_part) == (rhs % n_part));
}
void insert_latched(T* v, Latch& lat, uint32_t flags) {
(void) lat.p->tr.insert_unique_commit(*v, lat.commit_data);
if (flags & FLAG_UNLOCK)
lat.lock->unlock();
} /* insert_latched */
void insert(uint64_t hk, T* v, uint32_t flags) {
Partition& p = partition_of_scalar(hk);
if (flags & FLAG_LOCK)
p.lock.lock();
p.tr.insert_unique(*v);
if (flags & FLAG_LOCK)
p.lock.unlock();
} /* insert */
void remove(uint64_t hk, T* v, uint32_t flags) {
Partition& p = partition_of_scalar(hk);
iterator it = TTree::s_iterator_to(*v);
if (flags & FLAG_LOCK)
p.lock.lock();
p.tr.erase(it);
if (csz) { /* template specialize? */
uint32_t slot = hk % csz;
T* v2 = p.cache[slot];
/* we are intrusive, just compare addresses */
if (v == v2)
p.cache[slot] = nullptr;
}
if (flags & FLAG_LOCK)
p.lock.unlock();
} /* remove */
void drain(std::function<void(T*)> uref,
uint32_t flags = FLAG_NONE) {
/* clear a table, call supplied function on
* each element found (e.g., returns sentinel
* references) */
Object::Queue2 drain_q;
for (int t_ix = 0; t_ix < n_part; ++t_ix) {
Partition& p = part[t_ix];
if (flags & FLAG_LOCK) /* LOCKED */
p.lock.lock();
while (p.tr.size() > 0) {
iterator it = p.tr.begin();
T* v = &(*it);
p.tr.erase(it);
drain_q.push_front(*v);
}
if (flags & FLAG_LOCK) /* we locked it, !LOCKED */
p.lock.unlock();
} /* each partition */
/* unref out-of-line && !LOCKED */
while (drain_q.size() > 0) {
Object::Queue2::iterator it = drain_q.begin();
T* v = static_cast<T*>(&(*it));
drain_q.erase(it); /* must precede uref(v) in safe_link mode */
uref(v);
}
} /* drain */
private:
Partition *part;
std::vector<LK*> locks;
};
} /* namespace LRU */
} /* namespace cohort */
#endif /* COHORT_LRU_H */
| 12,135 | 23.320641 | 77 | h |
null | ceph-main/src/common/common_init.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2009-2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_COMMON_INIT_H
#define CEPH_COMMON_INIT_H
#include <deque>
#include "include/common_fwd.h"
#include "common/code_environment.h"
enum common_init_flags_t {
// Set up defaults that make sense for an unprivileged daemon
CINIT_FLAG_UNPRIVILEGED_DAEMON_DEFAULTS = 0x1,
// By default, don't read a configuration file OR contact mons
CINIT_FLAG_NO_DEFAULT_CONFIG_FILE = 0x2,
// Don't close stderr (in daemonize)
CINIT_FLAG_NO_CLOSE_STDERR = 0x4,
// don't do anything daemonish, like create /var/run/ceph, or print a banner
CINIT_FLAG_NO_DAEMON_ACTIONS = 0x8,
// don't drop privileges
CINIT_FLAG_DEFER_DROP_PRIVILEGES = 0x10,
// don't contact mons for config
CINIT_FLAG_NO_MON_CONFIG = 0x20,
// don't expose default cct perf counters
CINIT_FLAG_NO_CCT_PERF_COUNTERS = 0x40,
};
#ifndef WITH_SEASTAR
class CephInitParameters;
/*
* NOTE: If you are writing a Ceph daemon, ignore this function and call
* global_init instead. It will call common_preinit for you.
*
* common_preinit creates the CephContext.
*
* After this function gives you a CephContext, you need to set up the
* Ceph configuration, which lives inside the CephContext as md_config_t.
* The initial settings are not very useful because they do not reflect what
* the user asked for.
*
* This is usually done by something like this:
* cct->_conf.parse_env();
* cct->_conf.apply_changes();
*
* Your library may also supply functions to read a configuration file.
*/
CephContext *common_preinit(const CephInitParameters &iparams,
enum code_environment_t code_env, int flags);
#endif // #ifndef WITH_SEASTAR
/* Print out some parse error. */
void complain_about_parse_error(CephContext *cct,
const std::string& parse_error);
/* This function is called after you have done your last
* fork. When you make this call, the system will initialize everything that
* cannot be initialized before a fork.
*
* This includes things like starting threads, initializing libraries that
* can't handle forking, and so forth.
*
* If you are writing a Ceph library, you can call this pretty much any time.
* We do not allow our library users to fork and continue using the Ceph
* libraries. The most obvious reason for this is that the threads started by
* the Ceph libraries would be destroyed by a fork().
*/
void common_init_finish(CephContext *cct);
#endif
| 2,802 | 30.494382 | 78 | h |
null | ceph-main/src/common/compiler_extensions.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_COMPILER_EXTENSIONS_H
#define CEPH_COMPILER_EXTENSIONS_H
/* We should be able to take advantage of nice nonstandard features of gcc
* and other compilers, but still maintain portability.
*/
#ifdef __GNUC__
// GCC
#define WARN_UNUSED_RESULT __attribute__((warn_unused_result))
#else
// some other compiler - just make it a no-op
#define WARN_UNUSED_RESULT
#endif
#endif
| 796 | 24.709677 | 74 | h |
null | ceph-main/src/common/condition_variable_debug.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <condition_variable>
#include <ctime>
#include <pthread.h>
#include "common/ceph_time.h"
namespace ceph {
namespace mutex_debug_detail {
template<bool> class mutex_debug_impl;
}
class condition_variable_debug {
using mutex_debug = mutex_debug_detail::mutex_debug_impl<false>;
pthread_cond_t cond;
mutex_debug* waiter_mutex;
condition_variable_debug&
operator=(const condition_variable_debug&) = delete;
condition_variable_debug(const condition_variable_debug&) = delete;
public:
condition_variable_debug();
~condition_variable_debug();
void wait(std::unique_lock<mutex_debug>& lock);
template<class Predicate>
void wait(std::unique_lock<mutex_debug>& lock, Predicate pred) {
while (!pred()) {
wait(lock);
}
}
template<class Clock, class Duration>
std::cv_status wait_until(
std::unique_lock<mutex_debug>& lock,
const std::chrono::time_point<Clock, Duration>& when) {
if constexpr (Clock::is_steady) {
// convert from mono_clock to real_clock
auto real_when = ceph::real_clock::now();
const auto delta = when - Clock::now();
real_when += std::chrono::ceil<typename Clock::duration>(delta);
timespec ts = ceph::real_clock::to_timespec(real_when);
return _wait_until(lock.mutex(), &ts);
} else {
timespec ts = Clock::to_timespec(when);
return _wait_until(lock.mutex(), &ts);
}
}
template<class Rep, class Period>
std::cv_status wait_for(
std::unique_lock<mutex_debug>& lock,
const std::chrono::duration<Rep, Period>& awhile) {
ceph::real_time when{ceph::real_clock::now()};
when += awhile;
timespec ts = ceph::real_clock::to_timespec(when);
return _wait_until(lock.mutex(), &ts);
}
template<class Rep, class Period, class Pred>
bool wait_for(
std::unique_lock<mutex_debug>& lock,
const std::chrono::duration<Rep, Period>& awhile,
Pred pred) {
ceph::real_time when{ceph::real_clock::now()};
when += awhile;
timespec ts = ceph::real_clock::to_timespec(when);
while (!pred()) {
if ( _wait_until(lock.mutex(), &ts) == std::cv_status::timeout) {
return pred();
}
}
return true;
}
void notify_one();
void notify_all(bool sloppy = false);
private:
std::cv_status _wait_until(mutex_debug* mutex, timespec* ts);
};
} // namespace ceph
| 2,464 | 28.345238 | 71 | h |
null | ceph-main/src/common/fd.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2012 Inktank
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_COMMON_FD_H
#define CEPH_COMMON_FD_H
#include "include/common_fwd.h"
void dump_open_fds(CephContext *cct);
#endif
| 534 | 22.26087 | 70 | h |
null | ceph-main/src/common/config.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_CONFIG_H
#define CEPH_CONFIG_H
#include <map>
#include <variant>
#include <boost/container/small_vector.hpp>
#include "common/ConfUtils.h"
#include "common/code_environment.h"
#include "log/SubsystemMap.h"
#include "common/options.h"
#include "common/subsys_types.h"
#include "common/config_tracker.h"
#include "common/config_values.h"
#include "include/common_fwd.h"
enum {
CONF_DEFAULT,
CONF_MON,
CONF_FILE,
CONF_ENV,
CONF_CMDLINE,
CONF_OVERRIDE,
CONF_FINAL
};
extern const char *ceph_conf_level_name(int level);
/** This class represents the current Ceph configuration.
*
* For Ceph daemons, this is the daemon configuration. Log levels, caching
* settings, btrfs settings, and so forth can all be found here. For libcephfs
* and librados users, this is the configuration associated with their context.
*
* For information about how this class is loaded from a configuration file,
* see common/ConfUtils.
*
* ACCESS
*
* There are 3 ways to read the ceph context-- the old way and two new ways.
* In the old way, code would simply read the public variables of the
* configuration, without taking a lock. In the new way #1, code registers a
* configuration observer which receives callbacks when a value changes. These
* callbacks take place under the md_config_t lock. Alternatively one can use
* get_val(const char *name) method to safely get a copy of the value.
*
* To prevent serious problems resulting from thread-safety issues, we disallow
* changing std::string configuration values after
* md_config_t::safe_to_start_threads becomes true. You can still
* change integer or floating point values, and the option declared with
* SAFE_OPTION macro. Notice the latter options can not be read directly
* (conf->foo), one should use either observers or get_val() method
* (conf->get_val("foo")).
*
* FIXME: really we shouldn't allow changing integer or floating point values
* while another thread is reading them, either.
*/
struct md_config_t {
public:
typedef std::variant<int64_t ConfigValues::*,
uint64_t ConfigValues::*,
std::string ConfigValues::*,
double ConfigValues::*,
bool ConfigValues::*,
entity_addr_t ConfigValues::*,
entity_addrvec_t ConfigValues::*,
uuid_d ConfigValues::*> member_ptr_t;
// For use when intercepting configuration updates
typedef std::function<bool(
const std::string &k, const std::string &v)> config_callback;
/// true if we are a daemon (as per CephContext::code_env)
const bool is_daemon;
/*
* Mapping from legacy config option names to class members
*/
std::map<std::string_view, member_ptr_t> legacy_values;
/**
* The configuration schema, in the form of Option objects describing
* possible settings.
*/
std::map<std::string_view, const Option&> schema;
/// values from mon that we failed to set
std::map<std::string,std::string> ignored_mon_values;
/// original raw values saved that may need to re-expand at certain time
mutable std::vector<std::string> may_reexpand_meta;
/// encoded, cached copy of of values + ignored_mon_values
ceph::bufferlist values_bl;
/// version for values_bl; increments each time there is a change
uint64_t values_bl_version = 0;
/// encoded copy of defaults (map<string,string>)
ceph::bufferlist defaults_bl;
// Create a new md_config_t structure.
explicit md_config_t(ConfigValues& values,
const ConfigTracker& tracker,
bool is_daemon=false);
~md_config_t();
// Parse a config file
int parse_config_files(ConfigValues& values, const ConfigTracker& tracker,
const char *conf_files,
std::ostream *warnings, int flags);
int parse_buffer(ConfigValues& values, const ConfigTracker& tracker,
const char* buf, size_t len,
std::ostream *warnings);
void update_legacy_vals(ConfigValues& values);
// Absorb config settings from the environment
void parse_env(unsigned entity_type,
ConfigValues& values, const ConfigTracker& tracker,
const char *env_var = "CEPH_ARGS");
// Absorb config settings from argv
int parse_argv(ConfigValues& values, const ConfigTracker& tracker,
std::vector<const char*>& args, int level=CONF_CMDLINE);
// do any commands we got from argv (--show-config, --show-config-val)
void do_argv_commands(const ConfigValues& values) const;
bool _internal_field(const std::string& k);
void set_safe_to_start_threads();
void _clear_safe_to_start_threads(); // this is only used by the unit test
/// Look up an option in the schema
const Option *find_option(const std::string_view name) const;
/// Set a default value
void set_val_default(ConfigValues& values,
const ConfigTracker& tracker,
const std::string_view key, const std::string &val);
/// Set a values from mon
int set_mon_vals(CephContext *cct,
ConfigValues& values,
const ConfigTracker& tracker,
const std::map<std::string,std::string, std::less<>>& kv,
config_callback config_cb);
// Called by the Ceph daemons to make configuration changes at runtime
int injectargs(ConfigValues& values,
const ConfigTracker& tracker,
const std::string &s,
std::ostream *oss);
// Set a configuration value, or crash
// Metavariables will be expanded.
void set_val_or_die(ConfigValues& values, const ConfigTracker& tracker,
const std::string_view key, const std::string &val);
// Set a configuration value.
// Metavariables will be expanded.
int set_val(ConfigValues& values, const ConfigTracker& tracker,
const std::string_view key, const char *val,
std::stringstream *err_ss=nullptr);
int set_val(ConfigValues& values, const ConfigTracker& tracker,
const std::string_view key, const std::string& s,
std::stringstream *err_ss=nullptr) {
return set_val(values, tracker, key, s.c_str(), err_ss);
}
/// clear override value
int rm_val(ConfigValues& values, const std::string_view key);
/// get encoded map<string,map<int32_t,string>> of entire config
void get_config_bl(const ConfigValues& values,
uint64_t have_version,
ceph::buffer::list *bl,
uint64_t *got_version);
/// get encoded map<string,string> of compiled-in defaults
void get_defaults_bl(const ConfigValues& values, ceph::buffer::list *bl);
/// Get the default value of a configuration option
std::optional<std::string> get_val_default(std::string_view key);
// Get a configuration value.
// No metavariables will be returned (they will have already been expanded)
int get_val(const ConfigValues& values, const std::string_view key, char **buf, int len) const;
int get_val(const ConfigValues& values, const std::string_view key, std::string *val) const;
template<typename T> const T get_val(const ConfigValues& values, const std::string_view key) const;
template<typename T, typename Callback, typename...Args>
auto with_val(const ConfigValues& values, const std::string_view key,
Callback&& cb, Args&&... args) const ->
std::result_of_t<Callback(const T&, Args...)> {
return std::forward<Callback>(cb)(
std::get<T>(this->get_val_generic(values, key)),
std::forward<Args>(args)...);
}
void get_all_keys(std::vector<std::string> *keys) const;
// Return a list of all the sections that the current entity is a member of.
std::vector<std::string> get_my_sections(const ConfigValues& values) const;
// Return a list of all sections
int get_all_sections(std::vector <std::string> §ions) const;
// Get a value from the configuration file that we read earlier.
// Metavariables will be expanded if emeta is true.
int get_val_from_conf_file(const ConfigValues& values,
const std::vector <std::string> §ions,
const std::string_view key, std::string &out, bool emeta) const;
/// dump all config values to a stream
void show_config(const ConfigValues& values, std::ostream& out) const;
/// dump all config values to a formatter
void show_config(const ConfigValues& values, ceph::Formatter *f) const;
/// dump all config settings to a formatter
void config_options(ceph::Formatter *f) const;
/// dump config diff from default, conf, mon, etc.
void diff(const ConfigValues& values,
ceph::Formatter *f,
std::string name = {}) const;
/// print/log warnings/errors from parsing the config
void complain_about_parse_error(CephContext *cct);
private:
// we use this to avoid variable expansion loops
typedef boost::container::small_vector<std::pair<const Option*,
const Option::value_t*>,
4> expand_stack_t;
void validate_schema();
void validate_default_settings();
Option::value_t get_val_generic(const ConfigValues& values,
const std::string_view key) const;
int _get_val_cstr(const ConfigValues& values,
const std::string& key, char **buf, int len) const;
Option::value_t _get_val(const ConfigValues& values,
const std::string_view key,
expand_stack_t *stack=0,
std::ostream *err=0) const;
Option::value_t _get_val(const ConfigValues& values,
const Option& o,
expand_stack_t *stack=0,
std::ostream *err=0) const;
const Option::value_t& _get_val_default(const Option& o) const;
Option::value_t _get_val_nometa(const ConfigValues& values,
const Option& o) const;
int _rm_val(ConfigValues& values, const std::string_view key, int level);
void _refresh(ConfigValues& values, const Option& opt);
void _show_config(const ConfigValues& values,
std::ostream *out, ceph::Formatter *f) const;
int _get_val_from_conf_file(const std::vector<std::string> §ions,
const std::string_view key, std::string &out) const;
int parse_option(ConfigValues& values,
const ConfigTracker& tracker,
std::vector<const char*>& args,
std::vector<const char*>::iterator& i,
std::ostream *oss,
int level);
int parse_injectargs(ConfigValues& values,
const ConfigTracker& tracker,
std::vector<const char*>& args,
std::ostream *oss);
// @returns negative number for an error, otherwise a
// @c ConfigValues::set_value_result_t is returned.
int _set_val(
ConfigValues& values,
const ConfigTracker& tracker,
const std::string &val,
const Option &opt,
int level, // CONF_*
std::string *error_message);
template <typename T>
void assign_member(member_ptr_t ptr, const Option::value_t &val);
void update_legacy_val(ConfigValues& values,
const Option &opt,
member_ptr_t member);
Option::value_t _expand_meta(
const ConfigValues& values,
const Option::value_t& in,
const Option *o,
expand_stack_t *stack,
std::ostream *err) const;
public: // for global_init
void early_expand_meta(const ConfigValues& values,
std::string &val,
std::ostream *oss) const;
// for those want to reexpand special meta, e.g, $pid
bool finalize_reexpand_meta(ConfigValues& values,
const ConfigTracker& tracker);
std::list<std::string> get_conffile_paths(const ConfigValues& values,
const char *conf_files,
std::ostream *warnings,
int flags) const;
const std::string& get_conf_path() const {
return conf_path;
}
private:
static std::string get_cluster_name(const char* conffile_path);
// The configuration file we read, or NULL if we haven't read one.
ConfFile cf;
std::string conf_path;
public:
std::string parse_error;
private:
// This will be set to true when it is safe to start threads.
// Once it is true, it will never change.
bool safe_to_start_threads = false;
bool do_show_config = false;
std::string do_show_config_value;
std::vector<Option> subsys_options;
public:
std::string data_dir_option; ///< data_dir config option, if any
public:
unsigned get_osd_pool_default_min_size(const ConfigValues& values,
uint8_t size) const {
uint8_t min_size = get_val<uint64_t>(values, "osd_pool_default_min_size");
return min_size ? std::min(min_size, size) : (size - size / 2);
}
friend class test_md_config_t;
};
template<typename T>
const T md_config_t::get_val(const ConfigValues& values,
const std::string_view key) const {
return std::get<T>(this->get_val_generic(values, key));
}
inline std::ostream& operator<<(std::ostream& o, const std::monostate&) {
return o << "INVALID_CONFIG_VALUE";
}
int ceph_resolve_file_search(const std::string& filename_list,
std::string& result);
#endif
| 13,123 | 33.997333 | 101 | h |
null | ceph-main/src/common/config_cacher.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_CONFIG_CACHER_H
#define CEPH_CONFIG_CACHER_H
#include "common/config_obs.h"
#include "common/config.h"
template <typename ValueT>
class md_config_cacher_t : public md_config_obs_t {
ConfigProxy& conf;
const char* const option_name;
std::atomic<ValueT> value_cache;
const char** get_tracked_conf_keys() const override {
const static char* keys[] = { option_name, nullptr };
return keys;
}
void handle_conf_change(const ConfigProxy& conf,
const std::set<std::string>& changed) override {
if (changed.count(option_name)) {
value_cache.store(conf.get_val<ValueT>(option_name));
}
}
public:
md_config_cacher_t(ConfigProxy& conf,
const char* const option_name)
: conf(conf),
option_name(option_name) {
conf.add_observer(this);
std::atomic_init(&value_cache,
conf.get_val<ValueT>(option_name));
}
~md_config_cacher_t() {
conf.remove_observer(this);
}
operator ValueT() const {
return value_cache.load();
}
};
#endif // CEPH_CONFIG_CACHER_H
| 1,523 | 24.4 | 74 | h |
null | ceph-main/src/common/config_obs.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_CONFIG_OBS_H
#define CEPH_CONFIG_OBS_H
#include <set>
#include <string>
#include "common/config_fwd.h"
namespace ceph {
/** @brief Base class for configuration observers.
* Use this as a base class for your object if it has to respond to configuration changes,
* for example by updating some values or modifying its behavior.
* Subscribe for configuration changes by calling the md_config_t::add_observer() method
* and unsubscribe using md_config_t::remove_observer().
*/
template<class ConfigProxy>
class md_config_obs_impl {
public:
virtual ~md_config_obs_impl() {}
/** @brief Get a table of strings specifying the configuration keys in which the object is interested.
* This is called when the object is subscribed to configuration changes with add_observer().
* The returned table should not be freed until the observer is removed with remove_observer().
* Note that it is not possible to change the set of tracked keys without re-subscribing. */
virtual const char** get_tracked_conf_keys() const = 0;
/// React to a configuration change.
virtual void handle_conf_change(const ConfigProxy& conf,
const std::set <std::string> &changed) = 0;
/// Unused for now
virtual void handle_subsys_change(const ConfigProxy& conf,
const std::set<int>& changed) { }
};
}
using md_config_obs_t = ceph::md_config_obs_impl<ConfigProxy>;
#endif
| 1,819 | 34.686275 | 104 | h |
null | ceph-main/src/common/config_obs_mgr.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
#pragma once
#include <map>
#include <set>
#include <string>
#include "common/config_tracker.h"
class ConfigValues;
// @c ObserverMgr manages a set of config observers which are interested in
// the changes of settings at runtime.
template<class ConfigObs>
class ObserverMgr : public ConfigTracker {
// Maps configuration options to the observer listening for them.
using obs_map_t = std::multimap<std::string, ConfigObs*>;
obs_map_t observers;
public:
typedef std::map<ConfigObs*, std::set<std::string>> rev_obs_map;
typedef std::function<void(ConfigObs*, const std::string&)> config_gather_cb;
// Adds a new observer to this configuration. You can do this at any time,
// but it will only receive notifications for the changes that happen after
// you attach it, obviously.
//
// Most developers will probably attach their observers after global_init,
// but before anyone can call injectargs.
//
// The caller is responsible for allocating observers.
void add_observer(ConfigObs* observer);
// Remove an observer from this configuration.
// This doesn't delete the observer! If you allocated it with new(),
// you need to delete it yourself.
// This function will assert if you try to delete an observer that isn't
// there.
void remove_observer(ConfigObs* observer);
// invoke callback for every observers tracking keys
void for_each_observer(config_gather_cb callback);
// invoke callback for observers keys tracking the provided change set
template<class ConfigProxyT>
void for_each_change(const std::set<std::string>& changes,
ConfigProxyT& proxy,
config_gather_cb callback, std::ostream *oss);
bool is_tracking(const std::string& name) const override;
};
// we could put the implementations in a .cc file, and only instantiate the
// used template specializations explicitly, but that forces us to involve
// unused headers and libraries at compile-time. for instance, for instantiate,
// to instantiate ObserverMgr for seastar, we will need to include seastar
// headers to get the necessary types in place, but that would force us to link
// the non-seastar binaries against seastar libraries. so, to avoid pulling
// in unused dependencies at the expense of increasing compiling time, we put
// the implementation in the header file.
template<class ConfigObs>
void ObserverMgr<ConfigObs>::add_observer(ConfigObs* observer)
{
const char **keys = observer->get_tracked_conf_keys();
for (const char ** k = keys; *k; ++k) {
observers.emplace(*k, observer);
}
}
template<class ConfigObs>
void ObserverMgr<ConfigObs>::remove_observer(ConfigObs* observer)
{
[[maybe_unused]] bool found_obs = false;
for (auto o = observers.begin(); o != observers.end(); ) {
if (o->second == observer) {
observers.erase(o++);
found_obs = true;
} else {
++o;
}
}
ceph_assert(found_obs);
}
template<class ConfigObs>
void ObserverMgr<ConfigObs>::for_each_observer(config_gather_cb callback)
{
for (const auto& [key, obs] : observers) {
callback(obs, key);
}
}
template<class ConfigObs>
template<class ConfigProxyT>
void ObserverMgr<ConfigObs>::for_each_change(const std::set<std::string>& changes,
ConfigProxyT& proxy,
config_gather_cb callback, std::ostream *oss)
{
// create the reverse observer mapping, mapping observers to the set of
// changed keys that they'll get.
std::string val;
for (auto& key : changes) {
auto [first, last] = observers.equal_range(key);
if ((oss) && !proxy.get_val(key, &val)) {
(*oss) << key << " = '" << val << "' ";
if (first == last) {
(*oss) << "(not observed, change may require restart) ";
}
}
for (auto r = first; r != last; ++r) {
callback(r->second, key);
}
}
}
template<class ConfigObs>
bool ObserverMgr<ConfigObs>::is_tracking(const std::string& name) const
{
return observers.count(name) > 0;
}
| 4,114 | 33.579832 | 90 | h |
null | ceph-main/src/common/config_proxy.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
#pragma once
#include <type_traits>
#include "common/config.h"
#include "common/config_obs.h"
#include "common/config_obs_mgr.h"
#include "common/ceph_mutex.h"
// @c ConfigProxy is a facade of multiple config related classes. it exposes
// the legacy settings with arrow operator, and the new-style config with its
// member methods.
namespace ceph::common {
class ConfigProxy {
/**
* The current values of all settings described by the schema
*/
ConfigValues values;
using md_config_obs_t = ceph::md_config_obs_impl<ConfigProxy>;
ObserverMgr<md_config_obs_t> obs_mgr;
md_config_t config;
/** A lock that protects the md_config_t internals. It is
* recursive, for simplicity.
* It is best if this lock comes first in the lock hierarchy. We will
* hold this lock when calling configuration observers. */
mutable ceph::recursive_mutex lock =
ceph::make_recursive_mutex("ConfigProxy::lock");
class CallGate {
private:
uint32_t call_count = 0;
ceph::mutex lock;
ceph::condition_variable cond;
public:
CallGate()
: lock(ceph::make_mutex("call::gate::lock")) {
}
void enter() {
std::lock_guard<ceph::mutex> locker(lock);
++call_count;
}
void leave() {
std::lock_guard<ceph::mutex> locker(lock);
ceph_assert(call_count > 0);
if (--call_count == 0) {
cond.notify_all();
}
}
void close() {
std::unique_lock<ceph::mutex> locker(lock);
while (call_count != 0) {
cond.wait(locker);
}
}
};
void call_gate_enter(md_config_obs_t *obs) {
auto p = obs_call_gate.find(obs);
ceph_assert(p != obs_call_gate.end());
p->second->enter();
}
void call_gate_leave(md_config_obs_t *obs) {
auto p = obs_call_gate.find(obs);
ceph_assert(p != obs_call_gate.end());
p->second->leave();
}
void call_gate_close(md_config_obs_t *obs) {
auto p = obs_call_gate.find(obs);
ceph_assert(p != obs_call_gate.end());
p->second->close();
}
using rev_obs_map_t = ObserverMgr<md_config_obs_t>::rev_obs_map;
typedef std::unique_ptr<CallGate> CallGateRef;
std::map<md_config_obs_t*, CallGateRef> obs_call_gate;
void call_observers(std::unique_lock<ceph::recursive_mutex>& locker,
rev_obs_map_t& rev_obs) {
// observers are notified outside of lock
locker.unlock();
for (auto& [obs, keys] : rev_obs) {
obs->handle_conf_change(*this, keys);
}
locker.lock();
for (auto& rev_ob : rev_obs) {
call_gate_leave(rev_ob.first);
}
}
void map_observer_changes(md_config_obs_t *obs, const std::string &key,
rev_obs_map_t *rev_obs) {
ceph_assert(ceph_mutex_is_locked(lock));
auto [it, new_entry] = rev_obs->emplace(obs, std::set<std::string>{});
it->second.emplace(key);
if (new_entry) {
// this needs to be done under lock as once this lock is
// dropped (before calling observers) a remove_observer()
// can sneak in and cause havoc.
call_gate_enter(obs);
}
}
public:
explicit ConfigProxy(bool is_daemon)
: config{values, obs_mgr, is_daemon}
{}
ConfigProxy(const ConfigProxy &config_proxy)
: values(config_proxy.get_config_values()),
config{values, obs_mgr, config_proxy.config.is_daemon}
{}
const ConfigValues* operator->() const noexcept {
return &values;
}
ConfigValues* operator->() noexcept {
return &values;
}
ConfigValues get_config_values() const {
std::lock_guard l{lock};
return values;
}
void set_config_values(const ConfigValues& val) {
#ifndef WITH_SEASTAR
std::lock_guard l{lock};
#endif
values = val;
}
int get_val(const std::string_view key, char** buf, int len) const {
std::lock_guard l{lock};
return config.get_val(values, key, buf, len);
}
int get_val(const std::string_view key, std::string *val) const {
std::lock_guard l{lock};
return config.get_val(values, key, val);
}
template<typename T>
const T get_val(const std::string_view key) const {
std::lock_guard l{lock};
return config.template get_val<T>(values, key);
}
template<typename T, typename Callback, typename...Args>
auto with_val(const std::string_view key, Callback&& cb, Args&&... args) const {
std::lock_guard l{lock};
return config.template with_val<T>(values, key,
std::forward<Callback>(cb),
std::forward<Args>(args)...);
}
void config_options(ceph::Formatter *f) const {
config.config_options(f);
}
const decltype(md_config_t::schema)& get_schema() const {
return config.schema;
}
const Option* get_schema(const std::string_view key) const {
auto found = config.schema.find(key);
if (found == config.schema.end()) {
return nullptr;
} else {
return &found->second;
}
}
const Option *find_option(const std::string& name) const {
return config.find_option(name);
}
void diff(ceph::Formatter *f, const std::string& name = {}) const {
std::lock_guard l{lock};
return config.diff(values, f, name);
}
std::vector<std::string> get_my_sections() const {
std::lock_guard l{lock};
return config.get_my_sections(values);
}
int get_all_sections(std::vector<std::string>& sections) const {
std::lock_guard l{lock};
return config.get_all_sections(sections);
}
int get_val_from_conf_file(const std::vector<std::string>& sections,
const std::string_view key, std::string& out,
bool emeta) const {
std::lock_guard l{lock};
return config.get_val_from_conf_file(values,
sections, key, out, emeta);
}
unsigned get_osd_pool_default_min_size(uint8_t size) const {
return config.get_osd_pool_default_min_size(values, size);
}
void early_expand_meta(std::string &val,
std::ostream *oss) const {
std::lock_guard l{lock};
return config.early_expand_meta(values, val, oss);
}
// for those want to reexpand special meta, e.g, $pid
void finalize_reexpand_meta() {
std::unique_lock locker(lock);
rev_obs_map_t rev_obs;
if (config.finalize_reexpand_meta(values, obs_mgr)) {
_gather_changes(values.changed, &rev_obs, nullptr);
}
call_observers(locker, rev_obs);
}
void add_observer(md_config_obs_t* obs) {
std::lock_guard l(lock);
obs_mgr.add_observer(obs);
obs_call_gate.emplace(obs, std::make_unique<CallGate>());
}
void remove_observer(md_config_obs_t* obs) {
std::lock_guard l(lock);
call_gate_close(obs);
obs_call_gate.erase(obs);
obs_mgr.remove_observer(obs);
}
void call_all_observers() {
std::unique_lock locker(lock);
rev_obs_map_t rev_obs;
obs_mgr.for_each_observer(
[this, &rev_obs](md_config_obs_t *obs, const std::string &key) {
map_observer_changes(obs, key, &rev_obs);
});
call_observers(locker, rev_obs);
}
void set_safe_to_start_threads() {
config.set_safe_to_start_threads();
}
void _clear_safe_to_start_threads() {
config._clear_safe_to_start_threads();
}
void show_config(std::ostream& out) {
std::lock_guard l{lock};
config.show_config(values, out);
}
void show_config(ceph::Formatter *f) {
std::lock_guard l{lock};
config.show_config(values, f);
}
void config_options(ceph::Formatter *f) {
std::lock_guard l{lock};
config.config_options(f);
}
int rm_val(const std::string_view key) {
std::lock_guard l{lock};
return config.rm_val(values, key);
}
// Expand all metavariables. Make any pending observer callbacks.
void apply_changes(std::ostream* oss) {
std::unique_lock locker(lock);
rev_obs_map_t rev_obs;
// apply changes until the cluster name is assigned
if (!values.cluster.empty()) {
// meta expands could have modified anything. Copy it all out again.
_gather_changes(values.changed, &rev_obs, oss);
}
call_observers(locker, rev_obs);
}
void _gather_changes(std::set<std::string> &changes,
rev_obs_map_t *rev_obs, std::ostream* oss) {
obs_mgr.for_each_change(
changes, *this,
[this, rev_obs](md_config_obs_t *obs, const std::string &key) {
map_observer_changes(obs, key, rev_obs);
}, oss);
changes.clear();
}
int set_val(const std::string_view key, const std::string& s,
std::stringstream* err_ss=nullptr) {
std::lock_guard l{lock};
return config.set_val(values, obs_mgr, key, s, err_ss);
}
void set_val_default(const std::string_view key, const std::string& val) {
std::lock_guard l{lock};
config.set_val_default(values, obs_mgr, key, val);
}
void set_val_or_die(const std::string_view key, const std::string& val) {
std::lock_guard l{lock};
config.set_val_or_die(values, obs_mgr, key, val);
}
int set_mon_vals(CephContext *cct,
const std::map<std::string,std::string,std::less<>>& kv,
md_config_t::config_callback config_cb) {
std::unique_lock locker(lock);
int ret = config.set_mon_vals(cct, values, obs_mgr, kv, config_cb);
rev_obs_map_t rev_obs;
_gather_changes(values.changed, &rev_obs, nullptr);
call_observers(locker, rev_obs);
return ret;
}
int injectargs(const std::string &s, std::ostream *oss) {
std::unique_lock locker(lock);
int ret = config.injectargs(values, obs_mgr, s, oss);
rev_obs_map_t rev_obs;
_gather_changes(values.changed, &rev_obs, oss);
call_observers(locker, rev_obs);
return ret;
}
void parse_env(unsigned entity_type,
const char *env_var = "CEPH_ARGS") {
std::lock_guard l{lock};
config.parse_env(entity_type, values, obs_mgr, env_var);
}
int parse_argv(std::vector<const char*>& args, int level=CONF_CMDLINE) {
std::lock_guard l{lock};
return config.parse_argv(values, obs_mgr, args, level);
}
int parse_config_files(const char *conf_files,
std::ostream *warnings, int flags) {
std::lock_guard l{lock};
return config.parse_config_files(values, obs_mgr,
conf_files, warnings, flags);
}
bool has_parse_error() const {
return !config.parse_error.empty();
}
std::string get_parse_error() {
return config.parse_error;
}
void complain_about_parse_error(CephContext *cct) {
return config.complain_about_parse_error(cct);
}
void do_argv_commands() const {
std::lock_guard l{lock};
config.do_argv_commands(values);
}
void get_config_bl(uint64_t have_version,
ceph::buffer::list *bl,
uint64_t *got_version) {
std::lock_guard l{lock};
config.get_config_bl(values, have_version, bl, got_version);
}
void get_defaults_bl(ceph::buffer::list *bl) {
std::lock_guard l{lock};
config.get_defaults_bl(values, bl);
}
const std::string& get_conf_path() const {
return config.get_conf_path();
}
std::optional<std::string> get_val_default(std::string_view key) {
return config.get_val_default(key);
}
};
}
| 11,013 | 30.201133 | 82 | h |
null | ceph-main/src/common/config_tracker.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
#pragma once
#include <string>
// @ConfigTracker is queried to see if any added observers is tracking one or
// more changed settings.
//
// this class is introduced in hope to decouple @c md_config_t from any instantiated
// class of @c ObserverMgr, as what the former wants is but @c is_tracking(), and to
// make ObserverMgr a template parameter of md_config_t's methods just complicates
// the dependencies between header files, and slows down the compiling.
class ConfigTracker {
public:
virtual ~ConfigTracker() = default;
virtual bool is_tracking(const std::string& name) const = 0;
};
| 671 | 34.368421 | 84 | h |
null | ceph-main/src/common/config_values.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
#pragma once
#include <cstdint>
#include <map>
#include <set>
#include <string>
#include <utility>
#include "common/entity_name.h"
#include "common/options.h"
#include "log/SubsystemMap.h"
#include "msg/msg_types.h"
// @c ConfigValues keeps track of mappings from the config names to their values,
// debug logging settings, and some other "unnamed" settings, like entity name of
// the daemon.
class ConfigValues {
using values_t = std::map<std::string_view, std::map<int32_t,Option::value_t>>;
values_t values;
// for populating md_config_impl::legacy_values in ctor
friend struct md_config_t;
public:
EntityName name;
/// cluster name
std::string cluster;
ceph::logging::SubsystemMap subsys;
bool no_mon_config = false;
// Set of configuration options that have changed since the last
// apply_changes
using changed_set_t = std::set<std::string>;
changed_set_t changed;
// This macro block defines C members of the md_config_t struct
// corresponding to the definitions in legacy_config_opts.h.
// These C members are consumed by code that was written before
// the new options.cc infrastructure: all newer code should
// be consume options via explicit get() rather than C members.
#define OPTION_OPT_INT(name) int64_t name;
#define OPTION_OPT_LONGLONG(name) int64_t name;
#define OPTION_OPT_STR(name) std::string name;
#define OPTION_OPT_DOUBLE(name) double name;
#define OPTION_OPT_FLOAT(name) double name;
#define OPTION_OPT_BOOL(name) bool name;
#define OPTION_OPT_ADDR(name) entity_addr_t name;
#define OPTION_OPT_ADDRVEC(name) entity_addrvec_t name;
#define OPTION_OPT_U32(name) uint64_t name;
#define OPTION_OPT_U64(name) uint64_t name;
#define OPTION_OPT_UUID(name) uuid_d name;
#define OPTION_OPT_SIZE(name) uint64_t name;
#define OPTION(name, ty) \
public: \
OPTION_##ty(name)
#define SAFE_OPTION(name, ty) \
protected: \
OPTION_##ty(name)
#include "common/options/legacy_config_opts.h"
#undef OPTION_OPT_INT
#undef OPTION_OPT_LONGLONG
#undef OPTION_OPT_STR
#undef OPTION_OPT_DOUBLE
#undef OPTION_OPT_FLOAT
#undef OPTION_OPT_BOOL
#undef OPTION_OPT_ADDR
#undef OPTION_OPT_ADDRVEC
#undef OPTION_OPT_U32
#undef OPTION_OPT_U64
#undef OPTION_OPT_UUID
#undef OPTION
#undef SAFE_OPTION
public:
enum set_value_result_t {
SET_NO_CHANGE,
SET_NO_EFFECT,
SET_HAVE_EFFECT,
};
/**
* @return true if changed, false otherwise
*/
set_value_result_t set_value(std::string_view key,
Option::value_t&& value,
int level);
int rm_val(const std::string_view key, int level);
void set_logging(int which, const char* val);
/**
* @param level the level of the setting, -1 for the one with the
* highest-priority
*/
std::pair<Option::value_t, bool> get_value(const std::string_view name,
int level) const;
template<typename Func> void for_each(Func&& func) const {
for (const auto& [name,configs] : values) {
func(name, configs);
}
}
bool contains(const std::string_view key) const;
};
| 3,254 | 31.227723 | 81 | h |
null | ceph-main/src/common/containers.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
//
// Ceph - scalable distributed file system
//
// Copyright (C) 2018 Red Hat, Inc.
//
// This is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License version 2.1, as published by the Free Software
// Foundation. See file COPYING.
//
#ifndef CEPH_COMMON_CONTAINERS_H
#define CEPH_COMMON_CONTAINERS_H
#include <cstdint>
#include <type_traits>
namespace ceph::containers {
// tiny_vector – CPU friendly, small_vector-like container for mutexes,
// atomics and other non-movable things.
//
// The purpose of the container is to store arbitrary number of objects
// with absolutely minimal requirements regarding constructibility
// and assignability while minimizing memory indirection.
// There is no obligation for MoveConstructibility, CopyConstructibility,
// MoveAssignability, CopyAssignability nor even DefaultConstructibility
// which allows to handle std::mutexes, std::atomics or any type embedding
// them.
//
// Few requirements translate into tiny interface. The container isn't
// Copy- nor MoveConstructible. Although it does offer random access
// iterator, insertion in the middle is not allowed. The maximum number
// of elements must be known at run-time. This shouldn't be an issue in
// the intended use case: sharding.
//
// For the special case of no internal slots (InternalCapacity eq 0),
// tiny_vector doesn't require moving any elements (changing pointers
// is enough), and thus should be MoveConstructibile.
//
// Alternatives:
// 1. std::vector<boost::optional<ValueT>> initialized with the known
// size and emplace_backed(). boost::optional inside provides
// the DefaultConstructibility. Imposes extra memory indirection.
// 2. boost::container::small_vector + boost::optional always
// requires MoveConstructibility.
// 3. boost::container::static_vector feed via emplace_back().
// Good for performance but enforces upper limit on elements count.
// For sharding this means we can't handle arbitrary number of
// shards (weird configs).
// 4. std::unique_ptr<ValueT>: extra indirection together with memory
// fragmentation.
template<typename Value, std::size_t InternalCapacity = 0>
class tiny_vector {
// NOTE: to avoid false sharing consider aligning to cache line
using storage_unit_t = \
std::aligned_storage_t<sizeof(Value), alignof(Value)>;
std::size_t _size = 0;
storage_unit_t* const data = nullptr;
storage_unit_t internal[InternalCapacity];
public:
typedef std::size_t size_type;
typedef std::add_lvalue_reference_t<Value> reference;
typedef std::add_const_t<reference> const_reference;
typedef std::add_pointer_t<Value> pointer;
// emplacer is the piece of weirdness that comes from handling
// unmovable-and-uncopyable things. The only way to instantiate
// such types I know is to create instances in-place perfectly
// forwarding necessary data to constructor.
// Abstracting that is the exact purpose of emplacer.
//
// The usage scenario is:
// 1. The tiny_vector's ctor is provided with a) maximum number
// of instances and b) a callable taking emplacer.
// 2. The callable can (but isn't obliged to!) use emplacer to
// construct an instance without knowing at which address
// in memory it will be put. Callable is also supplied with
// an unique integer from the range <0, maximum number of
// instances).
// 3. If callable decides to instantiate, it calls ::emplace
// of emplacer passing all arguments required by the type
// hold in tiny_vector.
//
// Example:
// ```
// static constexpr const num_internally_allocated_slots = 32;
// tiny_vector<T, num_internally_allocated_slots> mytinyvec {
// num_of_instances,
// [](const size_t i, auto emplacer) {
// emplacer.emplace(argument_for_T_ctor);
// }
// }
// ```
//
// For the sake of supporting the ceph::make_mutex() family of
// factories, which relies on C++17's guaranteed copy elision,
// the emplacer provides `data()` to retrieve the location for
// constructing the instance with placement-new. This is handy
// as the `emplace()` depends on perfect forwarding, and thus
// interfere with the elision for cases like:
// ```
// emplacer.emplace(ceph::make_mutex("mtx-name"));
// ```
// See: https://stackoverflow.com/a/52498826
class emplacer {
friend class tiny_vector;
tiny_vector* parent;
emplacer(tiny_vector* const parent)
: parent(parent) {
}
public:
void* data() {
void* const ret = &parent->data[parent->_size++];
parent = nullptr;
return ret;
}
template<class... Args>
void emplace(Args&&... args) {
if (parent) {
new (data()) Value(std::forward<Args>(args)...);
}
}
};
template<typename F>
tiny_vector(const std::size_t count, F&& f)
: data(count <= InternalCapacity ? internal
: new storage_unit_t[count]) {
for (std::size_t i = 0; i < count; ++i) {
// caller MAY emplace up to `count` elements but it IS NOT
// obliged to do so. The emplacer guarantees that the limit
// will never be exceeded.
f(i, emplacer(this));
}
}
~tiny_vector() {
for (auto& elem : *this) {
elem.~Value();
}
const auto data_addr = reinterpret_cast<std::uintptr_t>(data);
const auto this_addr = reinterpret_cast<std::uintptr_t>(this);
if (data_addr < this_addr ||
data_addr >= this_addr + sizeof(*this)) {
delete[] data;
}
}
reference operator[](size_type pos) {
return reinterpret_cast<reference>(data[pos]);
}
const_reference operator[](size_type pos) const {
return reinterpret_cast<const_reference>(data[pos]);
}
size_type size() const {
return _size;
}
pointer begin() {
return reinterpret_cast<pointer>(&data[0]);
}
pointer end() {
return reinterpret_cast<pointer>(&data[_size]);
}
const pointer begin() const {
return reinterpret_cast<pointer>(&data[0]);
}
const pointer end() const {
return reinterpret_cast<pointer>(&data[_size]);
}
const pointer cbegin() const {
return reinterpret_cast<pointer>(&data[0]);
}
const pointer cend() const {
return reinterpret_cast<pointer>(&data[_size]);
}
};
} // namespace ceph::containers
#endif // CEPH_COMMON_CONTAINERS_H
| 6,547 | 32.408163 | 74 | h |
null | ceph-main/src/common/convenience.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <mutex>
#include <memory>
#include <optional>
#include <shared_mutex>
#include <type_traits>
#include <utility>
#include <boost/optional.hpp>
#ifndef CEPH_COMMON_CONVENIENCE_H
#define CEPH_COMMON_CONVENIENCE_H
namespace ceph {
// boost::optional is wonderful! Unfortunately it lacks a function for
// the thing you would most obviously want to do with it: apply a
// function to its contents.
// There are two obvious candidates. The first is a function that
// takes a function and an optional value and returns an optional
// value, either holding the return value of the function or holding
// nothing.
//
// I'd considered making more overloads for mutable lvalue
// references, but those are going a bit beyond likely use cases.
//
template<typename T, typename F>
auto maybe_do(const boost::optional<T>& t, F&& f) ->
boost::optional<std::result_of_t<F(const std::decay_t<T>)>>
{
if (t)
return { std::forward<F>(f)(*t) };
else
return boost::none;
}
// The other obvious function takes an optional but returns an
// ‘unwrapped’ value, either the result of evaluating the function or
// a provided alternate value.
//
template<typename T, typename F, typename U>
auto maybe_do_or(const boost::optional<T>& t, F&& f, U&& u) ->
std::result_of_t<F(const std::decay_t<T>)>
{
static_assert(std::is_convertible_v<U, std::result_of_t<F(T)>>,
"Alternate value must be convertible to function return type.");
if (t)
return std::forward<F>(f)(*t);
else
return std::forward<U>(u);
}
// Same thing but for std::optional
template<typename T, typename F>
auto maybe_do(const std::optional<T>& t, F&& f) ->
std::optional<std::result_of_t<F(const std::decay_t<T>)>>
{
if (t)
return { std::forward<F>(f)(*t) };
else
return std::nullopt;
}
// The other obvious function takes an optional but returns an
// ‘unwrapped’ value, either the result of evaluating the function or
// a provided alternate value.
//
template<typename T, typename F, typename U>
auto maybe_do_or(const std::optional<T>& t, F&& f, U&& u) ->
std::result_of_t<F(const std::decay_t<T>)>
{
static_assert(std::is_convertible_v<U, std::result_of_t<F(T)>>,
"Alternate value must be convertible to function return type.");
if (t)
return std::forward<F>(f)(*t);
else
return std::forward<U>(u);
}
namespace _convenience {
template<typename... Ts, typename F, std::size_t... Is>
inline void for_each_helper(const std::tuple<Ts...>& t, const F& f,
std::index_sequence<Is...>) {
(f(std::get<Is>(t)), ..., void());
}
template<typename... Ts, typename F, std::size_t... Is>
inline void for_each_helper(std::tuple<Ts...>& t, const F& f,
std::index_sequence<Is...>) {
(f(std::get<Is>(t)), ..., void());
}
template<typename... Ts, typename F, std::size_t... Is>
inline void for_each_helper(const std::tuple<Ts...>& t, F& f,
std::index_sequence<Is...>) {
(f(std::get<Is>(t)), ..., void());
}
template<typename... Ts, typename F, std::size_t... Is>
inline void for_each_helper(std::tuple<Ts...>& t, F& f,
std::index_sequence<Is...>) {
(f(std::get<Is>(t)), ..., void());
}
}
template<typename... Ts, typename F>
inline void for_each(const std::tuple<Ts...>& t, const F& f) {
_convenience::for_each_helper(t, f, std::index_sequence_for<Ts...>{});
}
template<typename... Ts, typename F>
inline void for_each(std::tuple<Ts...>& t, const F& f) {
_convenience::for_each_helper(t, f, std::index_sequence_for<Ts...>{});
}
template<typename... Ts, typename F>
inline void for_each(const std::tuple<Ts...>& t, F& f) {
_convenience::for_each_helper(t, f, std::index_sequence_for<Ts...>{});
}
template<typename... Ts, typename F>
inline void for_each(std::tuple<Ts...>& t, F& f) {
_convenience::for_each_helper(t, f, std::index_sequence_for<Ts...>{});
}
}
#endif // CEPH_COMMON_CONVENIENCE_H
| 4,276 | 30.448529 | 72 | h |
null | ceph-main/src/common/crc32c_aarch64.c | #include "acconfig.h"
#include "include/int_types.h"
#include "common/crc32c_aarch64.h"
#include "arch/arm.h"
#ifndef HAVE_ARMV8_CRC_CRYPTO_INTRINSICS
/* Request crc extension capabilities from the assembler */
asm(".arch_extension crc");
#ifdef HAVE_ARMV8_CRYPTO
/* Request crypto extension capabilities from the assembler */
asm(".arch_extension crypto");
#endif
#define CRC32CX(crc, value) __asm__("crc32cx %w[c], %w[c], %x[v]":[c]"+r"(crc):[v]"r"(value))
#define CRC32CW(crc, value) __asm__("crc32cw %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value))
#define CRC32CH(crc, value) __asm__("crc32ch %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value))
#define CRC32CB(crc, value) __asm__("crc32cb %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value))
#define CRC32C3X8(ITR) \
__asm__("crc32cx %w[c1], %w[c1], %x[v]":[c1]"+r"(crc1):[v]"r"(*((const uint64_t *)buffer + 42*1 + (ITR))));\
__asm__("crc32cx %w[c2], %w[c2], %x[v]":[c2]"+r"(crc2):[v]"r"(*((const uint64_t *)buffer + 42*2 + (ITR))));\
__asm__("crc32cx %w[c0], %w[c0], %x[v]":[c0]"+r"(crc0):[v]"r"(*((const uint64_t *)buffer + 42*0 + (ITR))));
#define CRC32C3X8_ZERO \
__asm__("crc32cx %w[c0], %w[c0], xzr":[c0]"+r"(crc0));
#else /* HAVE_ARMV8_CRC_CRYPTO_INTRINSICS */
#include <arm_acle.h>
#include <arm_neon.h>
#define CRC32CX(crc, value) (crc) = __crc32cd((crc), (value))
#define CRC32CW(crc, value) (crc) = __crc32cw((crc), (value))
#define CRC32CH(crc, value) (crc) = __crc32ch((crc), (value))
#define CRC32CB(crc, value) (crc) = __crc32cb((crc), (value))
#define CRC32C3X8(ITR) \
crc1 = __crc32cd(crc1, *((const uint64_t *)buffer + 42*1 + (ITR)));\
crc2 = __crc32cd(crc2, *((const uint64_t *)buffer + 42*2 + (ITR)));\
crc0 = __crc32cd(crc0, *((const uint64_t *)buffer + 42*0 + (ITR)));
#define CRC32C3X8_ZERO \
crc0 = __crc32cd(crc0, (const uint64_t)0);
#endif /* HAVE_ARMV8_CRC_CRYPTO_INTRINSICS */
#define CRC32C7X3X8(ITR) do {\
CRC32C3X8((ITR)*7+0) \
CRC32C3X8((ITR)*7+1) \
CRC32C3X8((ITR)*7+2) \
CRC32C3X8((ITR)*7+3) \
CRC32C3X8((ITR)*7+4) \
CRC32C3X8((ITR)*7+5) \
CRC32C3X8((ITR)*7+6) \
} while(0)
#define CRC32C7X3X8_ZERO do {\
CRC32C3X8_ZERO \
CRC32C3X8_ZERO \
CRC32C3X8_ZERO \
CRC32C3X8_ZERO \
CRC32C3X8_ZERO \
CRC32C3X8_ZERO \
CRC32C3X8_ZERO \
} while(0)
#define PREF4X64L1(PREF_OFFSET, ITR) \
__asm__("PRFM PLDL1KEEP, [%x[v],%[c]]"::[v]"r"(buffer), [c]"I"((PREF_OFFSET) + ((ITR) + 0)*64));\
__asm__("PRFM PLDL1KEEP, [%x[v],%[c]]"::[v]"r"(buffer), [c]"I"((PREF_OFFSET) + ((ITR) + 1)*64));\
__asm__("PRFM PLDL1KEEP, [%x[v],%[c]]"::[v]"r"(buffer), [c]"I"((PREF_OFFSET) + ((ITR) + 2)*64));\
__asm__("PRFM PLDL1KEEP, [%x[v],%[c]]"::[v]"r"(buffer), [c]"I"((PREF_OFFSET) + ((ITR) + 3)*64));
#define PREF1KL1(PREF_OFFSET) \
PREF4X64L1((PREF_OFFSET), 0) \
PREF4X64L1((PREF_OFFSET), 4) \
PREF4X64L1((PREF_OFFSET), 8) \
PREF4X64L1((PREF_OFFSET), 12)
#define PREF4X64L2(PREF_OFFSET, ITR) \
__asm__("PRFM PLDL2KEEP, [%x[v],%[c]]"::[v]"r"(buffer), [c]"I"((PREF_OFFSET) + ((ITR) + 0)*64));\
__asm__("PRFM PLDL2KEEP, [%x[v],%[c]]"::[v]"r"(buffer), [c]"I"((PREF_OFFSET) + ((ITR) + 1)*64));\
__asm__("PRFM PLDL2KEEP, [%x[v],%[c]]"::[v]"r"(buffer), [c]"I"((PREF_OFFSET) + ((ITR) + 2)*64));\
__asm__("PRFM PLDL2KEEP, [%x[v],%[c]]"::[v]"r"(buffer), [c]"I"((PREF_OFFSET) + ((ITR) + 3)*64));
#define PREF1KL2(PREF_OFFSET) \
PREF4X64L2((PREF_OFFSET), 0) \
PREF4X64L2((PREF_OFFSET), 4) \
PREF4X64L2((PREF_OFFSET), 8) \
PREF4X64L2((PREF_OFFSET), 12)
uint32_t ceph_crc32c_aarch64(uint32_t crc, unsigned char const *buffer, unsigned len)
{
int64_t length = len;
uint32_t crc0, crc1, crc2;
if (buffer) {
#ifdef HAVE_ARMV8_CRYPTO
if (ceph_arch_aarch64_pmull) {
#ifdef HAVE_ARMV8_CRC_CRYPTO_INTRINSICS
/* Calculate reflected crc with PMULL Instruction */
const poly64_t k1 = 0xe417f38a, k2 = 0x8f158014;
uint64_t t0, t1;
/* crc done "by 3" for fixed input block size of 1024 bytes */
while ((length -= 1024) >= 0) {
/* Prefetch data for following block to avoid cache miss */
PREF1KL2(1024*3);
/* Do first 8 bytes here for better pipelining */
crc0 = __crc32cd(crc, *(const uint64_t *)buffer);
crc1 = 0;
crc2 = 0;
buffer += sizeof(uint64_t);
/* Process block inline
Process crc0 last to avoid dependency with above */
CRC32C7X3X8(0);
CRC32C7X3X8(1);
CRC32C7X3X8(2);
CRC32C7X3X8(3);
CRC32C7X3X8(4);
CRC32C7X3X8(5);
buffer += 42*3*sizeof(uint64_t);
/* Prefetch data for following block to avoid cache miss */
PREF1KL1(1024);
/* Merge crc0 and crc1 into crc2
crc1 multiply by K2
crc0 multiply by K1 */
t1 = (uint64_t)vmull_p64(crc1, k2);
t0 = (uint64_t)vmull_p64(crc0, k1);
crc = __crc32cd(crc2, *(const uint64_t *)buffer);
crc1 = __crc32cd(0, t1);
crc ^= crc1;
crc0 = __crc32cd(0, t0);
crc ^= crc0;
buffer += sizeof(uint64_t);
}
#else /* !HAVE_ARMV8_CRC_CRYPTO_INTRINSICS */
__asm__("mov x16, #0xf38a \n\t"
"movk x16, #0xe417, lsl 16 \n\t"
"mov v1.2d[0], x16 \n\t"
"mov x16, #0x8014 \n\t"
"movk x16, #0x8f15, lsl 16 \n\t"
"mov v0.2d[0], x16 \n\t"
:::"x16","v0","v1");
while ((length -= 1024) >= 0) {
PREF1KL2(1024*3);
__asm__("crc32cx %w[c0], %w[c], %x[v]\n\t"
:[c0]"=r"(crc0):[c]"r"(crc), [v]"r"(*(const uint64_t *)buffer):);
crc1 = 0;
crc2 = 0;
buffer += sizeof(uint64_t);
CRC32C7X3X8(0);
CRC32C7X3X8(1);
CRC32C7X3X8(2);
CRC32C7X3X8(3);
CRC32C7X3X8(4);
CRC32C7X3X8(5);
buffer += 42*3*sizeof(uint64_t);
PREF1KL1(1024);
__asm__("mov v2.2d[0], %x[c1] \n\t"
"pmull v2.1q, v2.1d, v0.1d \n\t"
"mov v3.2d[0], %x[c0] \n\t"
"pmull v3.1q, v3.1d, v1.1d \n\t"
"crc32cx %w[c], %w[c2], %x[v] \n\t"
"mov %x[c1], v2.2d[0] \n\t"
"crc32cx %w[c1], wzr, %x[c1] \n\t"
"eor %w[c], %w[c], %w[c1] \n\t"
"mov %x[c0], v3.2d[0] \n\t"
"crc32cx %w[c0], wzr, %x[c0] \n\t"
"eor %w[c], %w[c], %w[c0] \n\t"
:[c1]"+r"(crc1), [c0]"+r"(crc0), [c2]"+r"(crc2), [c]"+r"(crc)
:[v]"r"(*((const uint64_t *)buffer))
:"v0","v1","v2","v3");
buffer += sizeof(uint64_t);
}
#endif /* HAVE_ARMV8_CRC_CRYPTO_INTRINSICS */
if(!(length += 1024))
return crc;
}
#endif /* HAVE_ARMV8_CRYPTO */
while ((length -= sizeof(uint64_t)) >= 0) {
CRC32CX(crc, *(uint64_t *)buffer);
buffer += sizeof(uint64_t);
}
/* The following is more efficient than the straight loop */
if (length & sizeof(uint32_t)) {
CRC32CW(crc, *(uint32_t *)buffer);
buffer += sizeof(uint32_t);
}
if (length & sizeof(uint16_t)) {
CRC32CH(crc, *(uint16_t *)buffer);
buffer += sizeof(uint16_t);
}
if (length & sizeof(uint8_t))
CRC32CB(crc, *buffer);
} else {
#ifdef HAVE_ARMV8_CRYPTO
if (ceph_arch_aarch64_pmull) {
#ifdef HAVE_ARMV8_CRC_CRYPTO_INTRINSICS
const poly64_t k1 = 0xe417f38a;
uint64_t t0;
while ((length -= 1024) >= 0) {
crc0 = __crc32cd(crc, 0);
CRC32C7X3X8_ZERO;
CRC32C7X3X8_ZERO;
CRC32C7X3X8_ZERO;
CRC32C7X3X8_ZERO;
CRC32C7X3X8_ZERO;
CRC32C7X3X8_ZERO;
/* Merge crc0 into crc: crc0 multiply by K1 */
t0 = (uint64_t)vmull_p64(crc0, k1);
crc = __crc32cd(0, t0);
}
#else /* !HAVE_ARMV8_CRC_CRYPTO_INTRINSICS */
__asm__("mov x16, #0xf38a \n\t"
"movk x16, #0xe417, lsl 16 \n\t"
"mov v1.2d[0], x16 \n\t"
:::"x16","v1");
while ((length -= 1024) >= 0) {
__asm__("crc32cx %w[c0], %w[c], xzr\n\t"
:[c0]"=r"(crc0):[c]"r"(crc));
CRC32C7X3X8_ZERO;
CRC32C7X3X8_ZERO;
CRC32C7X3X8_ZERO;
CRC32C7X3X8_ZERO;
CRC32C7X3X8_ZERO;
CRC32C7X3X8_ZERO;
__asm__("mov v3.2d[0], %x[c0] \n\t"
"pmull v3.1q, v3.1d, v1.1d \n\t"
"mov %x[c0], v3.2d[0] \n\t"
"crc32cx %w[c], wzr, %x[c0] \n\t"
:[c]"=r"(crc)
:[c0]"r"(crc0)
:"v1","v3");
}
#endif /* HAVE_ARMV8_CRC_CRYPTO_INTRINSICS */
if(!(length += 1024))
return crc;
}
#endif /* HAVE_ARMV8_CRYPTO */
while ((length -= sizeof(uint64_t)) >= 0)
CRC32CX(crc, 0);
/* The following is more efficient than the straight loop */
if (length & sizeof(uint32_t))
CRC32CW(crc, 0);
if (length & sizeof(uint16_t))
CRC32CH(crc, 0);
if (length & sizeof(uint8_t))
CRC32CB(crc, 0);
}
return crc;
}
| 8,681 | 30.570909 | 109 | c |
null | ceph-main/src/common/crc32c_intel_baseline.c | /*
* Copyright 2012-2013 Intel Corporation All Rights Reserved.
* All rights reserved.
*
* http://opensource.org/licenses/BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "include/int_types.h"
#define MAX_ITER 8
unsigned long crc32_table_iscsi_base[256] = {
0x00000000, 0xF26B8303, 0xE13B70F7, 0x1350F3F4,
0xC79A971F, 0x35F1141C, 0x26A1E7E8, 0xD4CA64EB,
0x8AD958CF, 0x78B2DBCC, 0x6BE22838, 0x9989AB3B,
0x4D43CFD0, 0xBF284CD3, 0xAC78BF27, 0x5E133C24,
0x105EC76F, 0xE235446C, 0xF165B798, 0x030E349B,
0xD7C45070, 0x25AFD373, 0x36FF2087, 0xC494A384,
0x9A879FA0, 0x68EC1CA3, 0x7BBCEF57, 0x89D76C54,
0x5D1D08BF, 0xAF768BBC, 0xBC267848, 0x4E4DFB4B,
0x20BD8EDE, 0xD2D60DDD, 0xC186FE29, 0x33ED7D2A,
0xE72719C1, 0x154C9AC2, 0x061C6936, 0xF477EA35,
0xAA64D611, 0x580F5512, 0x4B5FA6E6, 0xB93425E5,
0x6DFE410E, 0x9F95C20D, 0x8CC531F9, 0x7EAEB2FA,
0x30E349B1, 0xC288CAB2, 0xD1D83946, 0x23B3BA45,
0xF779DEAE, 0x05125DAD, 0x1642AE59, 0xE4292D5A,
0xBA3A117E, 0x4851927D, 0x5B016189, 0xA96AE28A,
0x7DA08661, 0x8FCB0562, 0x9C9BF696, 0x6EF07595,
0x417B1DBC, 0xB3109EBF, 0xA0406D4B, 0x522BEE48,
0x86E18AA3, 0x748A09A0, 0x67DAFA54, 0x95B17957,
0xCBA24573, 0x39C9C670, 0x2A993584, 0xD8F2B687,
0x0C38D26C, 0xFE53516F, 0xED03A29B, 0x1F682198,
0x5125DAD3, 0xA34E59D0, 0xB01EAA24, 0x42752927,
0x96BF4DCC, 0x64D4CECF, 0x77843D3B, 0x85EFBE38,
0xDBFC821C, 0x2997011F, 0x3AC7F2EB, 0xC8AC71E8,
0x1C661503, 0xEE0D9600, 0xFD5D65F4, 0x0F36E6F7,
0x61C69362, 0x93AD1061, 0x80FDE395, 0x72966096,
0xA65C047D, 0x5437877E, 0x4767748A, 0xB50CF789,
0xEB1FCBAD, 0x197448AE, 0x0A24BB5A, 0xF84F3859,
0x2C855CB2, 0xDEEEDFB1, 0xCDBE2C45, 0x3FD5AF46,
0x7198540D, 0x83F3D70E, 0x90A324FA, 0x62C8A7F9,
0xB602C312, 0x44694011, 0x5739B3E5, 0xA55230E6,
0xFB410CC2, 0x092A8FC1, 0x1A7A7C35, 0xE811FF36,
0x3CDB9BDD, 0xCEB018DE, 0xDDE0EB2A, 0x2F8B6829,
0x82F63B78, 0x709DB87B, 0x63CD4B8F, 0x91A6C88C,
0x456CAC67, 0xB7072F64, 0xA457DC90, 0x563C5F93,
0x082F63B7, 0xFA44E0B4, 0xE9141340, 0x1B7F9043,
0xCFB5F4A8, 0x3DDE77AB, 0x2E8E845F, 0xDCE5075C,
0x92A8FC17, 0x60C37F14, 0x73938CE0, 0x81F80FE3,
0x55326B08, 0xA759E80B, 0xB4091BFF, 0x466298FC,
0x1871A4D8, 0xEA1A27DB, 0xF94AD42F, 0x0B21572C,
0xDFEB33C7, 0x2D80B0C4, 0x3ED04330, 0xCCBBC033,
0xA24BB5A6, 0x502036A5, 0x4370C551, 0xB11B4652,
0x65D122B9, 0x97BAA1BA, 0x84EA524E, 0x7681D14D,
0x2892ED69, 0xDAF96E6A, 0xC9A99D9E, 0x3BC21E9D,
0xEF087A76, 0x1D63F975, 0x0E330A81, 0xFC588982,
0xB21572C9, 0x407EF1CA, 0x532E023E, 0xA145813D,
0x758FE5D6, 0x87E466D5, 0x94B49521, 0x66DF1622,
0x38CC2A06, 0xCAA7A905, 0xD9F75AF1, 0x2B9CD9F2,
0xFF56BD19, 0x0D3D3E1A, 0x1E6DCDEE, 0xEC064EED,
0xC38D26C4, 0x31E6A5C7, 0x22B65633, 0xD0DDD530,
0x0417B1DB, 0xF67C32D8, 0xE52CC12C, 0x1747422F,
0x49547E0B, 0xBB3FFD08, 0xA86F0EFC, 0x5A048DFF,
0x8ECEE914, 0x7CA56A17, 0x6FF599E3, 0x9D9E1AE0,
0xD3D3E1AB, 0x21B862A8, 0x32E8915C, 0xC083125F,
0x144976B4, 0xE622F5B7, 0xF5720643, 0x07198540,
0x590AB964, 0xAB613A67, 0xB831C993, 0x4A5A4A90,
0x9E902E7B, 0x6CFBAD78, 0x7FAB5E8C, 0x8DC0DD8F,
0xE330A81A, 0x115B2B19, 0x020BD8ED, 0xF0605BEE,
0x24AA3F05, 0xD6C1BC06, 0xC5914FF2, 0x37FACCF1,
0x69E9F0D5, 0x9B8273D6, 0x88D28022, 0x7AB90321,
0xAE7367CA, 0x5C18E4C9, 0x4F48173D, 0xBD23943E,
0xF36E6F75, 0x0105EC76, 0x12551F82, 0xE03E9C81,
0x34F4F86A, 0xC69F7B69, 0xD5CF889D, 0x27A40B9E,
0x79B737BA, 0x8BDCB4B9, 0x988C474D, 0x6AE7C44E,
0xBE2DA0A5, 0x4C4623A6, 0x5F16D052, 0xAD7D5351,
};
// iSCSI CRC baseline function
uint32_t ceph_crc32c_intel_baseline(uint32_t crc_init2, unsigned char const *buffer, unsigned len)
{
unsigned int crc_init = crc_init2;
unsigned int crc;
unsigned char* p_buf;
if (buffer) {
p_buf = (unsigned char*)buffer;
unsigned char const * p_end = buffer + len;
crc = crc_init;
while (p_buf < (unsigned char *) p_end ){
crc = (crc >> 8) ^ crc32_table_iscsi_base[(crc & 0x000000FF) ^ *p_buf++];
}
} else {
crc = crc_init;
while (len--) {
crc = (crc >> 8) ^ crc32_table_iscsi_base[(crc & 0x000000FF)];
}
}
return crc;
}
| 5,556 | 40.470149 | 98 | c |
null | ceph-main/src/common/crc32c_intel_fast.c | #include "acconfig.h"
#include "common/crc32c_intel_baseline.h"
extern unsigned int crc32_iscsi_00(unsigned char const *buffer, uint64_t len, uint64_t crc) asm("crc32_iscsi_00");
extern unsigned int crc32_iscsi_zero_00(unsigned char const *buffer, uint64_t len, uint64_t crc) asm("crc32_iscsi_zero_00");
#ifdef HAVE_NASM_X64
uint32_t ceph_crc32c_intel_fast(uint32_t crc, unsigned char const *buffer, unsigned len)
{
uint32_t v;
unsigned left;
if (!buffer)
{
return crc32_iscsi_zero_00(buffer, len, crc);
}
/*
* the crc32_iscsi_00 method reads past buffer+len (because it
* reads full words) which makes valgrind unhappy. don't do
* that.
*/
if (len < 16)
return ceph_crc32c_intel_baseline(crc, buffer, len);
left = ((unsigned long)buffer + len) & 7;
len -= left;
v = crc32_iscsi_00(buffer, len, crc);
if (left)
v = ceph_crc32c_intel_baseline(v, buffer + len, left);
return v;
}
int ceph_crc32c_intel_fast_exists(void)
{
return 1;
}
#else
int ceph_crc32c_intel_fast_exists(void)
{
return 0;
}
uint32_t ceph_crc32c_intel_fast(uint32_t crc, unsigned char const *buffer, unsigned len)
{
return 0;
}
#endif
| 1,143 | 21 | 124 | c |
null | ceph-main/src/common/crc32c_ppc.c | /* Copyright (C) 2017 International Business Machines Corp.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#define CRC_TABLE
#define FAST_ZERO_TABLE
#include "acconfig.h"
#include "include/int_types.h"
#include "crc32c_ppc_constants.h"
#include "reverse.h"
#include <stdlib.h>
#include <strings.h>
#define VMX_ALIGN 16
#define VMX_ALIGN_MASK (VMX_ALIGN-1)
#ifdef HAVE_PPC64LE
#ifdef REFLECT
static unsigned int crc32_align(unsigned int crc, unsigned char const *p,
unsigned long len)
{
while (len--)
crc = crc_table[(crc ^ *p++) & 0xff] ^ (crc >> 8);
return crc;
}
#else
static unsigned int crc32_align(unsigned int crc, unsigned char const *p,
unsigned long len)
{
while (len--)
crc = crc_table[((crc >> 24) ^ *p++) & 0xff] ^ (crc << 8);
return crc;
}
#endif
static inline unsigned long polynomial_multiply(unsigned int a, unsigned int b) {
vector unsigned int va = {a, 0, 0, 0};
vector unsigned int vb = {b, 0, 0, 0};
vector unsigned long vt;
__asm__("vpmsumw %0,%1,%2" : "=v"(vt) : "v"(va), "v"(vb));
return vt[0];
}
unsigned int barrett_reduction(unsigned long val);
static inline unsigned int gf_multiply(unsigned int a, unsigned int b) {
return barrett_reduction(polynomial_multiply(a, b));
}
unsigned int append_zeros(unsigned int crc, unsigned long length) {
unsigned long i = 0;
while (length) {
if (length & 1) {
crc = gf_multiply(crc, crc_zero[i]);
}
i++;
length /= 2;
}
return crc;
}
unsigned int __crc32_vpmsum(unsigned int crc, unsigned char const *p,
unsigned long len);
static uint32_t crc32_vpmsum(uint32_t crc, unsigned char const *data,
unsigned len)
{
unsigned int prealign;
unsigned int tail;
#ifdef CRC_XOR
crc ^= 0xffffffff;
#endif
if (len < VMX_ALIGN + VMX_ALIGN_MASK) {
crc = crc32_align(crc, data, (unsigned long)len);
goto out;
}
if ((unsigned long)data & VMX_ALIGN_MASK) {
prealign = VMX_ALIGN - ((unsigned long)data & VMX_ALIGN_MASK);
crc = crc32_align(crc, data, prealign);
len -= prealign;
data += prealign;
}
crc = __crc32_vpmsum(crc, data, (unsigned long)len & ~VMX_ALIGN_MASK);
tail = len & VMX_ALIGN_MASK;
if (tail) {
data += len & ~VMX_ALIGN_MASK;
crc = crc32_align(crc, data, tail);
}
out:
#ifdef CRC_XOR
crc ^= 0xffffffff;
#endif
return crc;
}
/* This wrapper function works around the fact that crc32_vpmsum
* does not gracefully handle the case where the data pointer is NULL.
*/
uint32_t ceph_crc32c_ppc(uint32_t crc, unsigned char const *data, unsigned len)
{
if (!data) {
/* Handle the NULL buffer case. */
#ifdef REFLECT
crc = reverse_bits(crc);
#endif
crc = append_zeros(crc, len);
#ifdef REFLECT
crc = reverse_bits(crc);
#endif
} else {
/* Handle the valid buffer case. */
crc = crc32_vpmsum(crc, data, (unsigned long)len);
}
return crc;
}
#else /* HAVE_PPC64LE */
/* This symbol has to exist on non-ppc architectures (and on legacy
* ppc systems using power7 or below) in order to compile properly
* there, even though it won't be called.
*/
uint32_t ceph_crc32c_ppc(uint32_t crc, unsigned char const *data, unsigned len)
{
return 0;
}
#endif /* HAVE_PPC64LE */
| 3,676 | 23.677852 | 81 | c |
null | ceph-main/src/common/crc32c_ppc.h | /* Copyright (C) 2017 International Business Machines Corp.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef CEPH_COMMON_CRC32C_PPC_H
#define CEPH_COMMON_CRC32C_PPC_H
#ifdef __cplusplus
extern "C" {
#endif
extern uint32_t ceph_crc32c_ppc(uint32_t crc, unsigned char const *buffer, unsigned len);
#ifdef __cplusplus
}
#endif
#endif
| 577 | 24.130435 | 89 | h |
null | ceph-main/src/common/debug.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_DEBUG_H
#define CEPH_DEBUG_H
#include "common/dout.h"
/* Global version of the stuff in common/dout.h
*/
#define dout(v) ldout((dout_context), (v))
#define pdout(v, p) lpdout((dout_context), (v), (p))
#define dlog_p(sub, v) ldlog_p1((dout_context), (sub), (v))
#define generic_dout(v) lgeneric_dout((dout_context), (v))
#define derr lderr((dout_context))
#define generic_derr lgeneric_derr((dout_context))
#endif
| 850 | 22.638889 | 70 | h |
null | ceph-main/src/common/deleter.h | /*
* This file is open source software, licensed to you under the terms
* of the Apache License, Version 2.0 (the "License"). See the NOTICE file
* distributed with this work for additional information regarding copyright
* ownership. You may not use this file except in compliance with the License.
*
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (C) 2014 Cloudius Systems, Ltd.
*/
#ifndef CEPH_COMMON_DELETER_H
#define CEPH_COMMON_DELETER_H
#include <atomic>
#include <cstdlib>
#include <new>
#include <utility>
/// \addtogroup memory-module
/// @{
/// Provides a mechanism for managing the lifetime of a buffer.
///
/// A \c deleter is an object that is used to inform the consumer
/// of some buffer (not referenced by the deleter itself) how to
/// delete the buffer. This can be by calling an arbitrary function
/// or destroying an object carried by the deleter. Examples of
/// a deleter's encapsulated actions are:
///
/// - calling \c std::free(p) on some captured pointer, p
/// - calling \c delete \c p on some captured pointer, p
/// - decrementing a reference count somewhere
///
/// A deleter performs its action from its destructor.
class deleter final {
public:
/// \cond internal
struct impl;
struct raw_object_tag {};
/// \endcond
private:
// if bit 0 set, point to object to be freed directly.
impl* _impl = nullptr;
public:
/// Constructs an empty deleter that does nothing in its destructor.
deleter() = default;
deleter(const deleter&) = delete;
/// Moves a deleter.
deleter(deleter&& x) noexcept : _impl(x._impl) { x._impl = nullptr; }
/// \cond internal
explicit deleter(impl* i) : _impl(i) {}
deleter(raw_object_tag tag, void* object)
: _impl(from_raw_object(object)) {}
/// \endcond
/// Destroys the deleter and carries out the encapsulated action.
~deleter();
deleter& operator=(deleter&& x);
deleter& operator=(deleter&) = delete;
/// Performs a sharing operation. The encapsulated action will only
/// be carried out after both the original deleter and the returned
/// deleter are both destroyed.
///
/// \return a deleter with the same encapsulated action as this one.
deleter share();
/// Checks whether the deleter has an associated action.
explicit operator bool() const { return bool(_impl); }
/// \cond internal
void reset(impl* i) {
this->~deleter();
new (this) deleter(i);
}
/// \endcond
/// Appends another deleter to this deleter. When this deleter is
/// destroyed, both encapsulated actions will be carried out.
void append(deleter d);
private:
static bool is_raw_object(impl* i) {
auto x = reinterpret_cast<uintptr_t>(i);
return x & 1;
}
bool is_raw_object() const {
return is_raw_object(_impl);
}
static void* to_raw_object(impl* i) {
auto x = reinterpret_cast<uintptr_t>(i);
return reinterpret_cast<void*>(x & ~uintptr_t(1));
}
void* to_raw_object() const {
return to_raw_object(_impl);
}
impl* from_raw_object(void* object) {
auto x = reinterpret_cast<uintptr_t>(object);
return reinterpret_cast<impl*>(x | 1);
}
};
/// \cond internal
struct deleter::impl {
std::atomic_uint refs;
deleter next;
impl(deleter next) : refs(1), next(std::move(next)) {}
virtual ~impl() {}
};
/// \endcond
inline deleter::~deleter() {
if (is_raw_object()) {
std::free(to_raw_object());
return;
}
if (_impl && --_impl->refs == 0) {
delete _impl;
}
}
inline deleter& deleter::operator=(deleter&& x) {
if (this != &x) {
this->~deleter();
new (this) deleter(std::move(x));
}
return *this;
}
/// \cond internal
template <typename Deleter>
struct lambda_deleter_impl final : deleter::impl {
Deleter del;
lambda_deleter_impl(deleter next, Deleter&& del)
: impl(std::move(next)), del(std::move(del)) {}
~lambda_deleter_impl() override { del(); }
};
template <typename Object>
struct object_deleter_impl final : deleter::impl {
Object obj;
object_deleter_impl(deleter next, Object&& obj)
: impl(std::move(next)), obj(std::move(obj)) {}
};
template <typename Object>
inline
object_deleter_impl<Object>* make_object_deleter_impl(deleter next, Object obj) {
return new object_deleter_impl<Object>(std::move(next), std::move(obj));
}
/// \endcond
/// Makes a \ref deleter that encapsulates the action of
/// destroying an object, as well as running another deleter. The input
/// object is moved to the deleter, and destroyed when the deleter is destroyed.
///
/// \param d deleter that will become part of the new deleter's encapsulated action
/// \param o object whose destructor becomes part of the new deleter's encapsulated action
/// \related deleter
template <typename Object>
deleter make_deleter(deleter next, Object o) {
return deleter(new lambda_deleter_impl<Object>(std::move(next), std::move(o)));
}
/// Makes a \ref deleter that encapsulates the action of destroying an object. The input
/// object is moved to the deleter, and destroyed when the deleter is destroyed.
///
/// \param o object whose destructor becomes the new deleter's encapsulated action
/// \related deleter
template <typename Object>
deleter make_deleter(Object o) {
return make_deleter(deleter(), std::move(o));
}
/// \cond internal
struct free_deleter_impl final : deleter::impl {
void* obj;
free_deleter_impl(void* obj) : impl(deleter()), obj(obj) {}
~free_deleter_impl() override { std::free(obj); }
};
/// \endcond
inline deleter deleter::share() {
if (!_impl) {
return deleter();
}
if (is_raw_object()) {
_impl = new free_deleter_impl(to_raw_object());
}
++_impl->refs;
return deleter(_impl);
}
// Appends 'd' to the chain of deleters. Avoids allocation if possible. For
// performance reasons the current chain should be shorter and 'd' should be
// longer.
inline void deleter::append(deleter d) {
if (!d._impl) {
return;
}
impl* next_impl = _impl;
deleter* next_d = this;
while (next_impl) {
if (next_impl == d._impl)
return ;
if (is_raw_object(next_impl)) {
next_d->_impl = next_impl = new free_deleter_impl(to_raw_object(next_impl));
}
if (next_impl->refs != 1) {
next_d->_impl = next_impl = make_object_deleter_impl(std::move(next_impl->next), deleter(next_impl));
}
next_d = &next_impl->next;
next_impl = next_d->_impl;
}
next_d->_impl = d._impl;
d._impl = nullptr;
}
/// Makes a deleter that calls \c std::free() when it is destroyed.
///
/// \param obj object to free.
/// \related deleter
inline deleter make_free_deleter(void* obj) {
if (!obj) {
return deleter();
}
return deleter(deleter::raw_object_tag(), obj);
}
/// Makes a deleter that calls \c std::free() when it is destroyed, as well
/// as invoking the encapsulated action of another deleter.
///
/// \param d deleter to invoke.
/// \param obj object to free.
/// \related deleter
inline deleter make_free_deleter(deleter next, void* obj) {
return make_deleter(std::move(next), [obj] () mutable { std::free(obj); });
}
/// \see make_deleter(Object)
/// \related deleter
template <typename T>
inline deleter make_object_deleter(T&& obj) {
return deleter{make_object_deleter_impl(deleter(), std::move(obj))};
}
/// \see make_deleter(deleter, Object)
/// \related deleter
template <typename T>
inline deleter make_object_deleter(deleter d, T&& obj) {
return deleter{make_object_deleter_impl(std::move(d), std::move(obj))};
}
/// @}
#endif /* CEPH_COMMON_DELETER_H */
| 7,899 | 29.152672 | 107 | h |
null | ceph-main/src/common/dns_resolve.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2016 SUSE LINUX GmbH
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_DNS_RESOLVE_H
#define CEPH_DNS_RESOLVE_H
#include <netinet/in.h>
#ifndef _WIN32
#include <resolv.h>
#endif
#include "common/ceph_mutex.h"
#include "msg/msg_types.h" // for entity_addr_t
namespace ceph {
/**
* this class is used to facilitate the testing of
* resolv.h functions.
*/
class ResolvHWrapper {
public:
virtual ~ResolvHWrapper() {}
#ifdef HAVE_RES_NQUERY
virtual int res_nquery(res_state s, const char *hostname, int cls, int type,
u_char *buf, int bufsz);
virtual int res_nsearch(res_state s, const char *hostname, int cls, int type,
u_char *buf, int bufsz);
#else
virtual int res_query(const char *hostname, int cls, int type,
u_char *buf, int bufsz);
virtual int res_search(const char *hostname, int cls, int type,
u_char *buf, int bufsz);
#endif
};
/**
* @class DNSResolver
*
* This is a singleton class that exposes the functionality of DNS querying.
*/
class DNSResolver {
public:
// singleton declaration
static DNSResolver *get_instance()
{
static DNSResolver instance;
return &instance;
}
DNSResolver(DNSResolver const&) = delete;
void operator=(DNSResolver const&) = delete;
// this function is used by the unit test
static DNSResolver *get_instance(ResolvHWrapper *resolv_wrapper) {
DNSResolver *resolv = DNSResolver::get_instance();
delete resolv->resolv_h;
resolv->resolv_h = resolv_wrapper;
return resolv;
}
enum class SRV_Protocol {
TCP, UDP
};
struct Record {
uint16_t priority;
uint16_t weight;
entity_addr_t addr;
};
int resolve_cname(CephContext *cct, const std::string& hostname,
std::string *cname, bool *found);
/**
* Resolves the address given a hostname.
*
* @param hostname the hostname to resolved
* @param[out] addr the hostname's address
* @returns 0 on success, negative error code on failure
*/
int resolve_ip_addr(CephContext *cct, const std::string& hostname,
entity_addr_t *addr);
/**
* Returns the list of hostnames and addresses that provide a given
* service configured as DNS SRV records.
*
* @param service_name the service name
* @param trans_protocol the IP protocol used by the service (TCP or UDP)
* @param[out] srv_hosts the hostname to address map of available hosts
* providing the service. If no host exists the map is not
* changed.
* @returns 0 on success, negative error code on failure
*/
int resolve_srv_hosts(CephContext *cct, const std::string& service_name,
const SRV_Protocol trans_protocol, std::map<std::string, Record> *srv_hosts);
/**
* Returns the list of hostnames and addresses that provide a given
* service configured as DNS SRV records.
*
* @param service_name the service name
* @param trans_protocol the IP protocol used by the service (TCP or UDP)
* @param domain the domain of the service
* @param[out] srv_hosts the hostname to address map of available hosts
* providing the service. If no host exists the map is not
* changed.
* @returns 0 on success, negative error code on failure
*/
int resolve_srv_hosts(CephContext *cct, const std::string& service_name,
const SRV_Protocol trans_protocol, const std::string& domain,
std::map<std::string, Record> *srv_hosts);
private:
DNSResolver() { resolv_h = new ResolvHWrapper(); }
~DNSResolver();
ceph::mutex lock = ceph::make_mutex("DNSResolver::lock");
ResolvHWrapper *resolv_h;
#ifdef HAVE_RES_NQUERY
std::list<res_state> states;
int get_state(CephContext *cct, res_state *ps);
void put_state(res_state s);
#endif
#ifndef _WIN32
/* this private function allows to reuse the res_state structure used
* by other function of this class
*/
int resolve_ip_addr(CephContext *cct, res_state *res,
const std::string& hostname, entity_addr_t *addr);
#endif
std::string srv_protocol_to_str(SRV_Protocol proto) {
switch (proto) {
case SRV_Protocol::TCP:
return "tcp";
case SRV_Protocol::UDP:
return "udp";
}
return "";
}
};
}
#endif
| 4,743 | 27.238095 | 85 | h |
null | ceph-main/src/common/dout.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2010 Sage Weil <[email protected]>
* Copyright (C) 2010 Dreamhost
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_DOUT_H
#define CEPH_DOUT_H
#include <type_traits>
#include "include/ceph_assert.h"
#include "include/common_fwd.h"
#if defined(WITH_SEASTAR) && !defined(WITH_ALIEN)
#include <seastar/util/log.hh>
#include "crimson/common/log.h"
#include "crimson/common/config_proxy.h"
#else
#include "global/global_context.h"
#include "common/ceph_context.h"
#include "common/config.h"
#include "common/likely.h"
#include "common/Clock.h"
#include "log/Log.h"
#endif
extern void dout_emergency(const char * const str);
extern void dout_emergency(const std::string &str);
// intentionally conflict with endl
class _bad_endl_use_dendl_t { public: _bad_endl_use_dendl_t(int) {} };
static const _bad_endl_use_dendl_t endl = 0;
inline std::ostream& operator<<(std::ostream& out, _bad_endl_use_dendl_t) {
ceph_abort_msg("you are using the wrong endl.. use std::endl or dendl");
return out;
}
class DoutPrefixProvider {
public:
virtual std::ostream& gen_prefix(std::ostream& out) const = 0;
virtual CephContext *get_cct() const = 0;
virtual unsigned get_subsys() const = 0;
virtual ~DoutPrefixProvider() {}
};
inline std::ostream &operator<<(
std::ostream &lhs, const DoutPrefixProvider &dpp) {
return dpp.gen_prefix(lhs);
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<DoutPrefixProvider> : fmt::ostream_formatter {};
#endif
// a prefix provider with empty prefix
class NoDoutPrefix : public DoutPrefixProvider {
CephContext *const cct;
const unsigned subsys;
public:
NoDoutPrefix(CephContext *cct, unsigned subsys) : cct(cct), subsys(subsys) {}
std::ostream& gen_prefix(std::ostream& out) const override { return out; }
CephContext *get_cct() const override { return cct; }
unsigned get_subsys() const override { return subsys; }
};
// a prefix provider with static (const char*) prefix
class DoutPrefix : public NoDoutPrefix {
const char *const prefix;
public:
DoutPrefix(CephContext *cct, unsigned subsys, const char *prefix)
: NoDoutPrefix(cct, subsys), prefix(prefix) {}
std::ostream& gen_prefix(std::ostream& out) const override {
return out << prefix;
}
};
// a prefix provider that composes itself on top of another
class DoutPrefixPipe : public DoutPrefixProvider {
const DoutPrefixProvider& dpp;
public:
DoutPrefixPipe(const DoutPrefixProvider& dpp) : dpp(dpp) {}
std::ostream& gen_prefix(std::ostream& out) const override final {
dpp.gen_prefix(out);
add_prefix(out);
return out;
}
CephContext *get_cct() const override { return dpp.get_cct(); }
unsigned get_subsys() const override { return dpp.get_subsys(); }
virtual void add_prefix(std::ostream& out) const = 0;
};
// helpers
namespace ceph::dout {
template<typename T>
struct dynamic_marker_t {
T value;
// constexpr ctor isn't needed as it's an aggregate type
constexpr operator T() const { return value; }
};
template<typename T>
constexpr dynamic_marker_t<T> need_dynamic(T&& t) {
return dynamic_marker_t<T>{ std::forward<T>(t) };
}
template<typename T>
struct is_dynamic : public std::false_type {};
template<typename T>
struct is_dynamic<dynamic_marker_t<T>> : public std::true_type {};
} // ceph::dout
// generic macros
#define dout_prefix *_dout
#if defined(WITH_SEASTAR) && !defined(WITH_ALIEN)
#define dout_impl(cct, sub, v) \
do { \
if (crimson::common::local_conf()->subsys.should_gather(sub, v)) { \
seastar::logger& _logger = crimson::get_logger(sub); \
const auto _lv = v; \
std::ostringstream _out; \
std::ostream* _dout = &_out;
#define dendl_impl \
""; \
_logger.log(crimson::to_log_level(_lv), \
"{}", _out.str().c_str()); \
} \
} while (0)
#else
#define dout_impl(cct, sub, v) \
do { \
const bool should_gather = [&](const auto cctX) { \
if constexpr (ceph::dout::is_dynamic<decltype(sub)>::value || \
ceph::dout::is_dynamic<decltype(v)>::value) { \
return cctX->_conf->subsys.should_gather(sub, v); \
} else { \
/* The parentheses are **essential** because commas in angle \
* brackets are NOT ignored on macro expansion! A language's \
* limitation, sorry. */ \
return (cctX->_conf->subsys.template should_gather<sub, v>()); \
} \
}(cct); \
\
if (should_gather) { \
ceph::logging::MutableEntry _dout_e(v, sub); \
static_assert(std::is_convertible<decltype(&*cct), \
CephContext* >::value, \
"provided cct must be compatible with CephContext*"); \
auto _dout_cct = cct; \
std::ostream* _dout = &_dout_e.get_ostream();
#define dendl_impl std::flush; \
_dout_cct->_log->submit_entry(std::move(_dout_e)); \
} \
} while (0)
#endif // WITH_SEASTAR
#define lsubdout(cct, sub, v) dout_impl(cct, ceph_subsys_##sub, v) dout_prefix
#define ldout(cct, v) dout_impl(cct, dout_subsys, v) dout_prefix
#define lderr(cct) dout_impl(cct, ceph_subsys_, -1) dout_prefix
#define ldpp_subdout(dpp, sub, v) \
if (decltype(auto) pdpp = (dpp); pdpp) /* workaround -Wnonnull-compare for 'this' */ \
dout_impl(pdpp->get_cct(), ceph_subsys_##sub, v) \
pdpp->gen_prefix(*_dout)
#define ldpp_dout(dpp, v) \
if (decltype(auto) pdpp = (dpp); pdpp) /* workaround -Wnonnull-compare for 'this' */ \
dout_impl(pdpp->get_cct(), ceph::dout::need_dynamic(pdpp->get_subsys()), v) \
pdpp->gen_prefix(*_dout)
#define lgeneric_subdout(cct, sub, v) dout_impl(cct, ceph_subsys_##sub, v) *_dout
#define lgeneric_dout(cct, v) dout_impl(cct, ceph_subsys_, v) *_dout
#define lgeneric_derr(cct) dout_impl(cct, ceph_subsys_, -1) *_dout
#define ldlog_p1(cct, sub, lvl) \
(cct->_conf->subsys.should_gather((sub), (lvl)))
#define dendl dendl_impl
#endif
| 6,715 | 33.091371 | 88 | h |
null | ceph-main/src/common/entity_name.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_COMMON_ENTITY_NAME_H
#define CEPH_COMMON_ENTITY_NAME_H
#include <string_view>
#include <ifaddrs.h>
#include "msg/msg_types.h"
/* Represents a Ceph entity name.
*
* For example, mds.0 is the name of the first metadata server.
* client
*/
struct EntityName
{
void encode(ceph::buffer::list& bl) const {
using ceph::encode;
encode(type, bl);
encode(id, bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
using ceph::decode;
uint32_t type_;
std::string id_;
decode(type_, bl);
decode(id_, bl);
set(type_, id_);
}
const std::string& to_str() const;
const char *to_cstr() const;
bool from_str(std::string_view s);
void set(uint32_t type_, std::string_view id_);
int set(std::string_view type_, std::string_view id_);
void set_type(uint32_t type_);
int set_type(std::string_view type);
void set_id(std::string_view id_);
void set_name(entity_name_t n);
const char* get_type_str() const;
uint32_t get_type() const { return type; }
bool is_osd() const { return get_type() == CEPH_ENTITY_TYPE_OSD; }
bool is_mgr() const { return get_type() == CEPH_ENTITY_TYPE_MGR; }
bool is_mds() const { return get_type() == CEPH_ENTITY_TYPE_MDS; }
bool is_client() const { return get_type() == CEPH_ENTITY_TYPE_CLIENT; }
bool is_mon() const { return get_type() == CEPH_ENTITY_TYPE_MON; }
std::string_view get_type_name() const;
const std::string &get_id() const;
bool has_default_id() const;
static std::string get_valid_types_as_str();
static uint32_t str_to_ceph_entity_type(std::string_view);
friend bool operator<(const EntityName& a, const EntityName& b);
friend std::ostream& operator<<(std::ostream& out, const EntityName& n);
bool operator==(const EntityName& rhs) const noexcept {
return type == rhs.type && id == rhs.id;
}
private:
struct str_to_entity_type_t {
uint32_t type;
const char *str;
};
static const std::array<str_to_entity_type_t, 6> STR_TO_ENTITY_TYPE;
uint32_t type = 0;
std::string id;
std::string type_id;
};
WRITE_CLASS_ENCODER(EntityName)
#endif
| 2,519 | 26.096774 | 74 | h |
null | ceph-main/src/common/environment.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_COMMON_ENVIRONMENT_H
#define CEPH_COMMON_ENVIRONMENT_H
extern bool get_env_bool(const char *key);
extern int get_env_int(const char *key);
#endif
| 570 | 24.954545 | 70 | h |
null | ceph-main/src/common/error_code.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 Red Hat, Inc. <[email protected]>
*
* Author: Adam C. Emerson <[email protected]>
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License version
* 2.1, as published by the Free Software Foundation. See file
* COPYING.
*/
#ifndef COMMON_CEPH_ERROR_CODE
#define COMMON_CEPH_ERROR_CODE
#include <netdb.h>
#include <boost/system/error_code.hpp>
#include <boost/asio.hpp>
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wnon-virtual-dtor"
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wnon-virtual-dtor"
namespace ceph {
// This is for error categories we define, so we can specify the
// equivalent integral value at the point of definition.
class converting_category : public boost::system::error_category {
public:
virtual int from_code(int code) const noexcept = 0;
};
const boost::system::error_category& ceph_category() noexcept;
enum class errc {
not_in_map = 1, // The requested item was not found in the map
does_not_exist, // Item does not exist
failure, // An internal fault or inconsistency
exists, // Already exists
limit_exceeded, // Attempting to use too much of something
auth, // May not be an auth failure. It could be that the
// preconditions to attempt auth failed.
conflict, // Conflict or precondition failure
};
}
namespace boost::system {
template<>
struct is_error_condition_enum<::ceph::errc> {
static const bool value = true;
};
template<>
struct is_error_code_enum<::ceph::errc> {
static const bool value = false;
};
}
namespace ceph {
// explicit conversion:
inline boost::system::error_code make_error_code(errc e) noexcept {
return { static_cast<int>(e), ceph_category() };
}
// implicit conversion:
inline boost::system::error_condition make_error_condition(errc e) noexcept {
return { static_cast<int>(e), ceph_category() };
}
[[nodiscard]] boost::system::error_code to_error_code(int ret) noexcept;
[[nodiscard]] int from_error_code(boost::system::error_code e) noexcept;
}
#pragma GCC diagnostic pop
#pragma clang diagnostic pop
// Moved here from buffer.h so librados doesn't gain a dependency on
// Boost.System
namespace ceph::buffer {
inline namespace v15_2_0 {
const boost::system::error_category& buffer_category() noexcept;
enum class errc { bad_alloc = 1,
end_of_buffer,
malformed_input };
}
}
namespace boost::system {
template<>
struct is_error_code_enum<::ceph::buffer::errc> {
static const bool value = true;
};
template<>
struct is_error_condition_enum<::ceph::buffer::errc> {
static const bool value = false;
};
}
namespace ceph::buffer {
inline namespace v15_2_0 {
// implicit conversion:
inline boost::system::error_code make_error_code(errc e) noexcept {
return { static_cast<int>(e), buffer_category() };
}
// explicit conversion:
inline boost::system::error_condition
make_error_condition(errc e) noexcept {
return { static_cast<int>(e), buffer_category() };
}
struct error : boost::system::system_error {
using system_error::system_error;
};
struct bad_alloc : public error {
bad_alloc() : error(errc::bad_alloc) {}
bad_alloc(const char* what_arg) : error(errc::bad_alloc, what_arg) {}
bad_alloc(const std::string& what_arg) : error(errc::bad_alloc, what_arg) {}
};
struct end_of_buffer : public error {
end_of_buffer() : error(errc::end_of_buffer) {}
end_of_buffer(const char* what_arg) : error(errc::end_of_buffer, what_arg) {}
end_of_buffer(const std::string& what_arg)
: error(errc::end_of_buffer, what_arg) {}
};
struct malformed_input : public error {
malformed_input() : error(errc::malformed_input) {}
malformed_input(const char* what_arg)
: error(errc::malformed_input, what_arg) {}
malformed_input(const std::string& what_arg)
: error(errc::malformed_input, what_arg) {}
};
struct error_code : public error {
error_code(int r) : error(-r, boost::system::system_category()) {}
error_code(int r, const char* what_arg)
: error(-r, boost::system::system_category(), what_arg) {}
error_code(int r, const std::string& what_arg)
: error(-r, boost::system::system_category(), what_arg) {}
};
}
}
#endif // COMMON_CEPH_ERROR_CODE
| 4,378 | 27.809211 | 79 | h |
null | ceph-main/src/common/escape.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_RGW_ESCAPE_H
#define CEPH_RGW_ESCAPE_H
#include <ostream>
#include <string_view>
/* Returns the length of a buffer that would be needed to escape 'buf'
* as an XML attribute
*/
size_t escape_xml_attr_len(const char *buf);
/* Escapes 'buf' as an XML attribute. Assumes that 'out' is at least long
* enough to fit the output. You can find out the required length by calling
* escape_xml_attr_len first.
*/
void escape_xml_attr(const char *buf, char *out);
/* Returns the length of a buffer that would be needed to escape 'buf'
* as an JSON attribute
*/
size_t escape_json_attr_len(const char *buf, size_t src_len);
/* Escapes 'buf' as an JSON attribute. Assumes that 'out' is at least long
* enough to fit the output. You can find out the required length by calling
* escape_json_attr_len first.
*/
void escape_json_attr(const char *buf, size_t src_len, char *out);
/* Note: we escape control characters. Although the XML spec doesn't actually
* require this, Amazon does it in their XML responses.
*/
// stream output operators that write escaped text without making a copy
// usage:
// std::string xml_input = ...;
// std::cout << xml_stream_escaper(xml_input) << std::endl;
struct xml_stream_escaper {
std::string_view str;
xml_stream_escaper(std::string_view str) : str(str.data(), str.size()) {}
};
std::ostream& operator<<(std::ostream& out, const xml_stream_escaper& e);
struct json_stream_escaper {
std::string_view str;
json_stream_escaper(std::string_view str) : str(str.data(), str.size()) {}
};
std::ostream& operator<<(std::ostream& out, const json_stream_escaper& e);
#endif
| 2,043 | 30.446154 | 77 | h |
null | ceph-main/src/common/fault_injector.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2020 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include <type_traits>
#include <boost/type_traits/has_equal_to.hpp>
#include <boost/type_traits/has_left_shift.hpp>
#include <variant>
#include "include/ceph_assert.h"
#include "common/dout.h"
/// @file
/// A failure type that aborts the process with a failed assertion.
struct InjectAbort {};
/// A failure type that injects an error code and optionally logs a message.
struct InjectError {
/// error code to inject
int error;
/// an optional log channel to print an error message
const DoutPrefixProvider* dpp = nullptr;
};
/** @class FaultInjector
* @brief Used to instrument a code path with deterministic fault injection
* by making one or more calls to check().
*
* A default-constructed FaultInjector contains no failure. It can also be
* constructed with a failure of type InjectAbort or InjectError, along with
* a location to inject that failure.
*
* The contained failure can be overwritten with a call to inject() or clear().
* This is not thread-safe with respect to other member functions on the same
* instance.
*
* @tparam Key The location can be represented by any Key type that is
* movable, default-constructible, inequality-comparable and stream-outputable.
* A string or string_view Key may be preferable when the location comes from
* user input, or to describe the steps like "before-foo" and "after-foo".
* An integer Key may be preferable for a code path with many steps, where you
* just want to check 1, 2, 3, etc. without inventing names for each.
*/
template <typename Key>
class FaultInjector {
public:
/// Default-construct with no injected failure.
constexpr FaultInjector() noexcept : location() {}
/// Construct with an injected assertion failure at the given location.
constexpr FaultInjector(Key location, InjectAbort a)
: location(std::move(location)), failure(a) {}
/// Construct with an injected error code at the given location.
constexpr FaultInjector(Key location, InjectError e)
: location(std::move(location)), failure(e) {}
/// Inject an assertion failure at the given location.
void inject(Key location, InjectAbort a) {
this->location = std::move(location);
this->failure = a;
}
/// Inject an error at the given location.
void inject(Key location, InjectError e) {
this->location = std::move(location);
this->failure = e;
}
/// Clear any injected failure.
void clear() {
this->failure = Empty{};
}
/// Check for an injected failure at the given location. If the location
/// matches an InjectAbort failure, the process aborts here with an assertion
/// failure.
/// @returns 0 or InjectError::error if the location matches an InjectError
/// failure
[[nodiscard]] constexpr int check(const Key& location) const {
struct visitor {
const Key& check_location;
const Key& this_location;
constexpr int operator()(const std::monostate&) const {
return 0;
}
int operator()(const InjectAbort&) const {
if (check_location == this_location) {
ceph_assert_always(!"FaultInjector");
}
return 0;
}
int operator()(const InjectError& e) const {
if (check_location == this_location) {
ldpp_dout(e.dpp, -1) << "Injecting error=" << e.error
<< " at location=" << this_location << dendl;
return e.error;
}
return 0;
}
};
return std::visit(visitor{location, this->location}, failure);
}
private:
// Key requirements:
static_assert(std::is_default_constructible_v<Key>,
"Key must be default-constrible");
static_assert(std::is_move_constructible_v<Key>,
"Key must be move-constructible");
static_assert(std::is_move_assignable_v<Key>,
"Key must be move-assignable");
static_assert(boost::has_equal_to<Key, Key, bool>::value,
"Key must be equality-comparable");
static_assert(boost::has_left_shift<std::ostream, Key, std::ostream&>::value,
"Key must have an ostream operator<<");
Key location; // location of the check that should fail
using Empty = std::monostate; // empty state for std::variant
std::variant<Empty, InjectAbort, InjectError> failure;
};
| 4,680 | 33.419118 | 79 | h |
null | ceph-main/src/common/fork_function.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
// Run a function in a forked child, with a timeout.
#pragma once
#include <functional>
#include <iostream>
#include <ostream>
#include <signal.h>
#ifndef _WIN32
#include <sys/wait.h>
#endif
#include <sys/types.h>
#include "include/ceph_assert.h"
#include "common/errno.h"
#ifndef _WIN32
static void _fork_function_dummy_sighandler(int sig) {}
// Run a function post-fork, with a timeout. Function can return
// int8_t only due to unix exit code limitations. Returns -ETIMEDOUT
// if timeout is reached.
static inline int fork_function(
int timeout,
std::ostream& errstr,
std::function<int8_t(void)> f)
{
// first fork the forker.
pid_t forker_pid = fork();
if (forker_pid) {
// just wait
int status;
while (waitpid(forker_pid, &status, 0) == -1) {
ceph_assert(errno == EINTR);
}
if (WIFSIGNALED(status)) {
errstr << ": got signal: " << WTERMSIG(status) << "\n";
return 128 + WTERMSIG(status);
}
if (WIFEXITED(status)) {
int8_t r = WEXITSTATUS(status);
errstr << ": exit status: " << (int)r << "\n";
return r;
}
errstr << ": waitpid: unknown status returned\n";
return -1;
}
// we are forker (first child)
// close all fds
int maxfd = sysconf(_SC_OPEN_MAX);
if (maxfd == -1)
maxfd = 16384;
for (int fd = 0; fd <= maxfd; fd++) {
if (fd == STDIN_FILENO)
continue;
if (fd == STDOUT_FILENO)
continue;
if (fd == STDERR_FILENO)
continue;
::close(fd);
}
sigset_t mask, oldmask;
int pid;
// Restore default action for SIGTERM in case the parent process decided
// to ignore it.
if (signal(SIGTERM, SIG_DFL) == SIG_ERR) {
std::cerr << ": signal failed: " << cpp_strerror(errno) << "\n";
goto fail_exit;
}
// Because SIGCHLD is ignored by default, setup dummy handler for it,
// so we can mask it.
if (signal(SIGCHLD, _fork_function_dummy_sighandler) == SIG_ERR) {
std::cerr << ": signal failed: " << cpp_strerror(errno) << "\n";
goto fail_exit;
}
// Setup timeout handler.
if (signal(SIGALRM, timeout_sighandler) == SIG_ERR) {
std::cerr << ": signal failed: " << cpp_strerror(errno) << "\n";
goto fail_exit;
}
// Block interesting signals.
sigemptyset(&mask);
sigaddset(&mask, SIGINT);
sigaddset(&mask, SIGTERM);
sigaddset(&mask, SIGCHLD);
sigaddset(&mask, SIGALRM);
if (sigprocmask(SIG_SETMASK, &mask, &oldmask) == -1) {
std::cerr << ": sigprocmask failed: "
<< cpp_strerror(errno) << "\n";
goto fail_exit;
}
pid = fork();
if (pid == -1) {
std::cerr << ": fork failed: " << cpp_strerror(errno) << "\n";
goto fail_exit;
}
if (pid == 0) { // we are second child
// Restore old sigmask.
if (sigprocmask(SIG_SETMASK, &oldmask, NULL) == -1) {
std::cerr << ": sigprocmask failed: "
<< cpp_strerror(errno) << "\n";
goto fail_exit;
}
(void)setpgid(0, 0); // Become process group leader.
int8_t r = f();
_exit((uint8_t)r);
}
// Parent
(void)alarm(timeout);
for (;;) {
int signo;
if (sigwait(&mask, &signo) == -1) {
std::cerr << ": sigwait failed: " << cpp_strerror(errno) << "\n";
goto fail_exit;
}
switch (signo) {
case SIGCHLD:
int status;
if (waitpid(pid, &status, WNOHANG) == -1) {
std::cerr << ": waitpid failed: " << cpp_strerror(errno) << "\n";
goto fail_exit;
}
if (WIFEXITED(status))
_exit(WEXITSTATUS(status));
if (WIFSIGNALED(status))
_exit(128 + WTERMSIG(status));
std::cerr << ": unknown status returned\n";
goto fail_exit;
case SIGINT:
case SIGTERM:
// Pass SIGINT and SIGTERM, which are usually used to terminate
// a process, to the child.
if (::kill(pid, signo) == -1) {
std::cerr << ": kill failed: " << cpp_strerror(errno) << "\n";
goto fail_exit;
}
continue;
case SIGALRM:
std::cerr << ": timed out (" << timeout << " sec)\n";
if (::killpg(pid, SIGKILL) == -1) {
std::cerr << ": kill failed: " << cpp_strerror(errno) << "\n";
goto fail_exit;
}
_exit(-ETIMEDOUT);
default:
std::cerr << ": sigwait: invalid signal: " << signo << "\n";
goto fail_exit;
}
}
return 0;
fail_exit:
_exit(EXIT_FAILURE);
}
#else
static inline int fork_function(
int timeout,
std::ostream& errstr,
std::function<int8_t(void)> f)
{
errstr << "Forking is not available on Windows.\n";
return -1;
}
#endif
| 4,562 | 24.779661 | 74 | h |