[ captures ] <tparams> ( params ) specifiers exception attr -> ret requires { body }
attr
: have not supported yet (test on MSVC 16.8 preview)requires
: have not supported yet (test on MSVC 16.8 preview)
struct Foo {
template<class ...Args> constexpr Foo(Args&& ...args) noexcept { print(std::forward<Args>(args)...); }
template<class T, class ...Args> constexpr void print(T&& t, Args&&... args) const noexcept {
std::cout << t;
auto coutSpaceAndArg = [](auto&& arg) { std::cout << ' ' << arg; };
(..., coutSpaceAndArg(std::forward<Args>(args))); // fold expression since C++17
}
};
int main() {
int x = 10;
auto lamb = [x]<typename ...Ts>(Ts&&... ts) mutable constexpr noexcept -> auto {
Foo(std::forward<Ts>(ts)...);
return ++x;
};
}
Code generated by complier
struct Foo {
template<class ... Args> inline constexpr Foo(Args &&... args) noexcept { print(std::forward<Args>(args)... );}
template<class T> inline constexpr void print(T && t) const noexcept { std::cout << t; }
template<class T, class ... Args> inline constexpr void print(T && t, Args &&... args) const noexcept {
print(std::forward<T>(t));
if constexpr(sizeof...(Args) > 0) {
std::operator<<(std::cout, ' ');
print(std::forward<Args>(args)... );
}
}
};
int main() {
int x = 10;
class __lambda_27_17 {
public:
template<typename ... Ts>
inline /*constexpr */ auto operator()(Ts &&... ts) noexcept {
Foo(std::forward<Ts>(ts)... );
return ++x;
}
private:
int x;
public:
__lambda_27_17(int & _x)
: x{_x}
{}
};
__lambda_27_17 lamb = __lambda_27_17{x};
}
template <class F>
struct y_combinator {
F f; // the lambda will be stored here
// a forwarding operator():
template <class... Args>
decltype(auto) operator()(Args&&... args) const {
// we pass ourselves to f, then the arguments.
// the lambda should take the first argument as `auto&& recurse` or similar.
return f(*this, std::forward<Args>(args)...);
}
};
// helper function that deduces the type of the lambda:
template <class F>
y_combinator<std::decay_t<F>> make_y_combinator(F&& f) {
return { std::forward<F>(f) };
}
void test() {
auto fac = make_y_combinator([](auto&& recurse, int n) -> int{
if (n <= 1) return 1;
return n * recurse(n - 1);
});
std::cout << fac(5) << std::endl;
}
struct MyStruct {
int i = 0;
std::string s;
};
MyStruct ms;
auto [u,v] = ms; // as copy
auto& [k,l] = ms; // as reference
...
std::unordered_map myMap;
for(const auto& [key, val] : myMap) {
std::cout << key ": " << val << '\n';
}
Since C++17 you can define a variable/object in a header file as inline
and if this definition is used by multiple translation units, they all refer to the same unique object
class MyClass {
static inline std::string name = ""; // OK since C++17
};
inline MyClass myGlobalObj; // OK even if included/defined by multiple CPP files
constexpr
now impliesinline
forstatic
data member
struct D {
static constexpr int n = 5;
//inline static constexpr int n = 5; // the same as above
}
copy elision
benefit:
- Improve performance
- Apply for not
CopyConstructible
object.
below codes can't compile before C++17
std::atomic_int getValue() {
return std::atomic_int{1}; // **copy Elision (Mandatory since C++17)**
}
class Foo {
};
Foo getFoo() {
return Foo(); // **copy Elision (Mandatory since C++17)**
}
Foo getFoo() {
Foo fo;
return fo; // **NRVO: move constructor**
}
Foo getFoo(const Foo& fo) {
return fo; // **Copy constructor**
}
Foo getFoo(Foo fo) {
return fo; // **Move constructor**
}
lvalue
prvalue: pure rvalue
xvalue: expiring value
glvalue: generalized lvalue
- All names used as expressions are
lvalues
. - All string literals used as expression are
lvalues
. - All other literals (
4.2
,true
, ornullptr
) areprvalues
. - All temporaries (especially objects returned by value) are
prvalues
. - The result of
std::move()
is anxvalue
.
class X {
};
X v;
const X c;
void f(const X&); // accepts an expression of any value category
void f(X&&); // accepts prvalues and xvalues only, but is a better match
f(v); // passes a modifiable lvalue to the first f()
f(c); // passes a non-modifiable lvalue to the first f()
f(X()); // passes a prvalue to the second f()
f(std::move(v)); // passes an xvalue to the second f()
C++17 then introduces a new term, called materialization
(of a temporary) for the moment a prvalue
becomes a temporary object.
Thus, a temporary materialization conversion
is a prvalue
to xvalue
conversion.
void f(const X& p); // accepts an expression of any value category but expects a glvalue
f(X()); // passes a prvalue materialized as xvalue
constexpr
Lambdas: only literal types, nostatic variables
, no virtual, no try/catch
, no new/delete`
auto squared = [](auto val) { // implicitly constexpr since C++17
return val*val;
};
auto squared = [](auto val) constexpr { // explicitly constexpr since C++17
return val*val;
};
- Passing Copies of
this
to Lambdas
If we’d have captured this with[this], [=], or [&],
the thread runs into undefined behavior.
class C {
private:
std::string name;
public:
void foo() {
auto var = [*this] { std::cout << name << '\n'; };
}
};
- Attribute
[[nodiscard]]
- Attribute
[[maybe_unused]]
- Attribute
[[fallthrough]]
namespace A::B::C {}
The recommended way to initialize variables and objects should always be to use direct list initialization (brace initialization without =)
.
auto a{42}; // initializes an int now
auto c = {42}; // still initializes a std::initializer_list<int>
void f1();
void f2() noexcept; // different type
- Using Conditional Exception Specifications
- Consequences for Generic Libraries
#if __has_include(<filesystem>)
# include <filesystem>
#endif
std::optional<>
model a nullable instance of an arbitrary type.
The instance might be a member, an argument, or a return value.
You could also argue that a std::optional<>
is a container for zero or one element.
- Optional Return Values
// convert string to int if possible:
std::optional<int> asInt(const std::string& s) {
std::optional<int> ret; // initially no value
try {
ret = std::stoi(s);
}
catch (...) {
}
return ret;
}
- Optional Arguments and Data Members
class Name {
std::string mFirst;
std::optional<std::string> mMid;
std::string mLast;
}
With std::variant<> the C++ standard library provides a new union class
, which among other benefits supports a new way of polymorphism and dealing with inhomogeneous collections.
- Using
std::variant<>
std::variant<int, std::string> var{"hi"}; // initialized with string alternative
std::cout << var.index(); // prints 1
var = 42; // now holds int alternative
std::cout << var.index(); // prints 0
- std::monostate
To support variants, where the first type has no default constructor, a special helper type is provided:
std::monostate
.
std::variant<std::monostate, NoDefConstr> v2; // OK
- Visitors
std::variant<int, std::string, double> var(42);
auto printvariant = [](const auto& val) {
std::cout << val;
};
std::visit(printvariant, var);
- Polymorphism and Inhomogeneous Collections with
std::variant
// common type of all geometric object types:
using GeoObj = std::variant<Line, Circle, Rectangle>;
// create and initialize a collection of geometric objects:
std::vector<GeoObj> createFigure(){
std::vector<GeoObj> f;
f.push_back(Line{Coord{1,2},Coord{3,4}});
f.push_back(Circle{Coord{5,5},2});
f.push_back(Rectangle{Coord{3,3},Coord{6,4}});
return f;
}
int main() {
std::vector<GeoObj> figure = createFigure();
for (const GeoObj& geoobj : figure) {
std::visit([] (const auto& obj) {
obj.draw(); // polymorphic call of draw()
}, geoobj);
}
}
- In general, I would recommend now to program polymorphism with
std::variant<>
by default, because it is usually faster (no new and delete, no virtual functions for non-polymorphic use), a lot safer (no pointers), and usually all types are known at compile-time of all code.
- Using
std::any
std::any a; // a is empty
std::any b = 4.3; // b has value 4.3 of type double
a = 42; // a has value 42 of type int
b = std::string{"hi"}; // b has value "hi" of type std::string
- Using std::byte
std::byte b1{0x3F};
std::byte b2{0b1111’0000};
std::byte b3[4] {b1, b2, std::byte{1}}; // 4 bytes (last is 0)
The class template basic_string_view describes an object that can refer to a constant contiguous sequence of char-like objects
A string_view doesn't manage the storage that it refer to. Lifetime management is up to the user.
- When would you use a
string_view
instead ofstring
Pass as a parameter to apure
function(parametersconst string&
)
Returning from a function
A reference to part of long-lived data structure. - Drawbacks of string_view
Lifetime management
notnull-terminated
Don’t use std::string_view
at all unless you know what you do.
- Don't use
std::string_view
to initialize astd::string
member - Don't use initialize a
string_view
as below
std::string getStr() {
return std::string("long_string_help_to_detect_issues");
}
std::string_view sv1 = getStr(); // RISK
string_view sv = "abc"s; // RISK
...
std::from_chars
: converts a given character sequence to a numeric valuestd::to_chars
: converts numeric values to a given character sequence
std::unique_ptr
,std::make_unique
std::shared_ptr
,std::make_shared
std::weak_ptr
lvalue
: correspond to objects you can refer to, either by name or by following a pointer or lvalue reference.rvalue
: correspond to temporary objects returned from functions.std::move
: performs an unconditionally casts its input into an rvalue reference. It doesn't move anything.std::forward
: casts its argument to an rvalue only if that argument is bound to an rvalue. It doesn't forward anything.
The Nightmare of Move Semantics for Trivial Classes
Code example
- Caputure local variable only, no member variables, no static variables.
- Avoid default capture modes
There are two default capture modes in C++11: by-reference[&]
, and by-value[=]
Default by-reference capture can lead to dangling reference
Default by-value capture is susceptible to dangling pointers(especially this), and it misleadingly suggests that lambdas are self-contained. - C++14 supported caputure by moving the object (C++11 can use std::bind but a litle complicated)
std::vector<double> data;
auto func = [data = std::move(data)]{
// do something
};
std::thread
std::async
std::future
std::promise
std::atomic
std::mutex
std::condition_variable
std::tuple
std::array
std::forward_list
std::unordered_set
std::unordered_map
std::unordered_multiset
std::unordered_multimap
New Keyword | Explain |
---|---|
delete |
to prevent users call a particular function |
default |
|
override |
|
final |
|
noexcept |
|
auto |
|
constexpr |
|
nullptr |
|
thread_local |
auto lb = [](int n) { static thread_local int v = 0; v += n;}; |
using alias |
|
decltype |
|
enum class |
|
static_cast |
conversion between similar types such as pointer types or numeric types |
const_cast |
adds or removes const or volatile |
reinterpret_cast |
converts between pointers or between integral types and pointers |
dynamic_cast |
converts between polymorph pointers or references in the same class hierarchy |
VII. Moving to Modern C++ by Scott Meyers
- Braced
{}
initialization is the most widely usable initialization syntax, it prevents narrowing conversions, and it’s immune to C++’s most vexing parse. - During constructor overload resolution, braced initializers are matched to
std::initializer_list
parameters if at all possible, even if other constructors offer seemingly better matches. - An example of where the choice between parentheses and braces can make a significant difference is creating a
std::vector<numeric type>
with two arguments. - Choosing between parentheses and braces for object creation inside templates can be challenging.
- Default constructor
- Destructor
- Copy constructor
- Copy assignment operator
- Move constructor and move assignment operator
Refer to C++ Smart Pointers - Usage and Secrets - Nicolai Josuttis
- Use std::unique_ptr for exclusive-ownership resource management
- Downcasts do not work for unique pointers
class GeoObj {};
class Circle : public GeoObj {};
std::vector< std::unique_ptr<GeoObj> > geoObjs;
geoObjs.emplace_back( std::make_unique<Circle>(...) ); // Ok, insert circle into collection
const auto& p = geoObjs[0]; // p is unique_ptr<GeoObj>;
std::unique_ptr<Circle> cp{p}; // Compile-time Error
auto cp(dynamic_cast<std::unique_ptr<Circle>>(p); // Compile-time Error
if (auto cp = dynamic_cast<Circle*>(p.get())) // Ok, use in `if` because of restrict life time
{
// use cp as Circle*
}
- Pass std::unique_ptr to function
void sink(std::unique_ptr<GeoObj> const& up){} // Ok as normal, reference to unique pointer
void sink(std::unique_ptr<GeoObj> up){} // Ok, pass value only accept move `Herb Sutter` style
void sink(std::unique_ptr<GeoObj>&& up){} // Ok, pass rvalue only accept move `Scott Meyers` style
...
auto up(std::make_unique<GeoObj>());
sink(std::move(up));
up.release(); // Remember destroy up after using std::move
- Custome Deleter
auto deleter = [](std::FILE* fp) {
std::fclose(fp);
fp = nullptr;
};
std::FILE* fp = nullptr;
fopen_s(&fp, "test.txt", "w+");
std::unique_ptr<std::FILE, decltype(deleter)> up(fp, deleter); // deleter as type and argument
//std::unique_ptr<std::FILE, decltype(deleter)> up(fp); // OK with c++20
const char str[] = "hello world!\n";
fwrite(str, sizeof(char), sizeof(str), up.get());
- std::shared_ptrs are twice the size of a raw pointer
- Memory for the reference count must be dynamically allocated
- Increments and decrements of the reference count must be atomic
auto deleter = [](Widget* pw) {
delete pw;
pw = nullptr;
};
std::unique_ptr<Widget, decltype(deleter)> upw(new Widget, deleter); // deleter as type and argument
std::shared_ptr<Widget> spw(new Widget, deleter); // deleter is only at arg
- std::static_pointer_cast
- std::dynamic_pointer_cast
- std::const_pointer_cast
- std::reinterpret_pointer_cast
- First exmaple:
int* p = new int;
std::shared_ptr<int> sp1(p);
std::shared_ptr<int> sp2(p);
(*)Node : create a std::shared_ptr from raw pointer is bad idea
- Second exmaple:
Suppose our program usesstd::shared_ptrs
to manage Widget objects, and we have a data structure that keeps track of Widgets that have been processed.
class Widget {
public:
void process(std::vector< std::shared_ptr<Widget> > &widgets) {
widgets.emplace_back(this); // Create new control-block and point to the same Widget object.
}
}
int main() {
std::vector< std::shared_ptr<Widget> > processedWidgets;
auto spw = std::make_shared<Widget>(); // Create a control-block and point to new Widget object
spw->process(processedWidgets);
}
- The problem is there is 2 control-blocks point to the same Widget object. Hence, the Widget object was destroyed 2 times-> Crash application.
Solution by Scott Meyers
- Use
std::enable_shared_from_this
- This is The Curiously Recurring Template Pattern(CRTP)
class Widget : public std::enable_shared_from_this<Widget> {
public:
void process(std::vector< std::shared_ptr<Widget> > &widgets) {
widgets.emplace_back(shared_from_this()); // Look-up control-block
}
}
int main() {
std::vector< std::shared_ptr<Widget> > processedWidgets;
auto spw = std::make_shared<Widget>();
spw->process(processedWidgets);
}
Ok, so far so good, shared_from_this
looks up the control block for the current object, and it creates a new std::shared_ptr tha refers to that control block.
But in case the control block has not existed, shared_from_this
will throw exception. For example if change auto spw = std::make_shared<Widget>();
-> Widget* spw = new Widget();
Therefore, we must make sure the std::shared_ptr
already existed before call process()
Apply the factory function
template
class Widget : public std::enable_shared_from_this<Widget> {
private:
Widget() = default; // Invisiable constructor
public:
template<typename... Args>
static std::shared_ptr<Widget> create(Args&&... params) {
return std::shared_ptr<Widget>(new Widget(std::forward<Args>(params)...));
}
void process(std::vector< std::shared_ptr<Widget> > &widgets) {
widgets.emplace_back(shared_from_this()); // Look-up control-block
}
}
int main() {
std::vector< std::shared_ptr<Widget> > processedWidgets;
auto spw = Widget::create();
spw->process(processedWidgets);
}
OK so far so good, the issue was sorted
There is overhead if sending std::shared_ptr value to much (pass by argument value of function)
Should pass by reference of std::shared_ptr
Refer to Rainer Grimm
A std::shared_ptr
consists of a control block and its resource:
- The control block is thread-safe: That means, modifying the reference counter is an atomic operation and you have the guarantee that the resource will be deleted exactly once.
- The access to the resource is not thread-safe.
- Atomic smart pointers support by C++20
- This is circlic references issue of std::shared_ptr
In this case, we cannot use raw pointer because if A is destroyed, B will contain a pointer to A that will dangle.
B won't be able to detect that, so B may in advertently dereference the dangling pointers.
Thank tostd::weak_ptr
- Allow to share but not own an object or resource
Pointer that knows when it dangles
(Scott Meyers)- Resolve the circlic reference issue of
std::shared_ptr
andraw pointer
- Potential use cases for std::weak_ptr include caching, observer lists.
- Using a std::weak_ptr
1. No pointer interface
2. Access to the resource through a temporary shared pointer
std::weak_ptr<R> wp;
if (auto p = wp.lock())
p->callMember()
- But be aware that
std::weak_ptr
might hold memory if use withstd::make_shared<R>
to createtd::shared_ptr
Refer Fedor Pikus talked
- std::atomic is neither copyable nor movable. (Atomic variables are
not CopyConstructible
) - The primary std::atomic template may be instantiated with any
TriviallyCopyable
type T satisfying bothCopyConstructible
andCopyAssignable
.
What is trivially copyable?
1. Continuous chunk of memory
2. Copying the object means copying all bits (memcpy)
3. No virtual function, noexcept constructor
- On MSVC: If not define
_ENABLE_ATOMIC_ALIGNMENT_FIX
, the compiler will complain:std::atomic<T>
with sizeof(T) equal to 2/4/8 andalignof(T)
<sizeof(T)
struct Data { // user-defined trivially-copyable type
int x; // 4 byte
void* ptr; // 4 byte
Data() noexcept : x(0), ptr(nullptr)
{}
};
std::atomic<Data> atm;
...
struct A { int a[100]; };
std::atomic<A> a;
assert(std::atomic_is_lock_free(&a)); // false: a is not lock-free
(*) Note that doesn't full support for std::atomic<float>
, std::atomic<double>
until C++20
Before C++11, there was only one contract. The C++ language specification did not include multithreading or atomics. There was no memory model.
With C++11 everything has changed. C++11 is the first standard aware of multiple threads. The reason for the well-defined behaviour of threads is the C++ memory model that was heavily inspired by the Java memory model
enum memory_order{
memory_order_relaxed,
memory_order_consume,
memory_order_acquire,
memory_order_release,
memory_order_acq_rel,
memory_order_seq_cst
}
- Read operation:
memory_order_acquire
andmemory_order_consume
- Write operation:
memory_order_release
- Read-modify-write operation:
memory_order_acq_rel
andmemory_order_seq_cst
- Relaxed operation:
memory_order_relaxed
, there are no synchronization or ordering constraints imposed on other reads or writes, only this operation's atomicity is guaranteed.
Sequential consistency provides two guarantees:
- The instructions of a program are executed in source code order.
- There is a global order of all operations on all threads.
- The counter-intuitive behaviour is that thread 1 can see the operations of thread 2 in a different orderr, so there is no view of a global clock.
Fro example, from the perspective of thread 1, it is possible that the operationres2= x.load()
overtakesy.store(1)
. - It is even possible that thread 1 or thread 2 do not perform their operations in the order defined in the source code.
For example, thread 2 can first executeres2= x.load()
and theny.store(1)
.
This quite very very difficult to understand
cppreference
disscus
- Mutexes
std::mutex
std::recursive_mutex
: allows the same thread to lock the mutex many times.
std::timed_mutex
std::recursive_timed_mutex
shared_mutex
: Shared mutexes are usually used in situations when multiple readers can access the same resource at the same time without causing data races, but only one writer can do so.
shared_timed_mutex
- Locks
std::lock_guard
std::unique_lock
std::scoped_lock
: lock many mutexes
std::shared_lock
: many threads can read but only one thread can write
Thread-local data, also known as thread-local storage, is created for each thread separately.
std::condition_variable
: only wait on object of type std::unique_lock<std::mutex>
std::condition_variable_any
: can wait on an user-supplied lock type that meet the concept of BasicLockable
.
Condition variables enable threads to be synchronised via messages.
- The Predicate
- Lost Wakeup
- Spurious Wakeup
- The Wait Workflow
std::unique_lock<std::mutex> lck(mutex_);
condVar.wait(lck, []{ return dataReady; });
Equal
std::unique_lock<std::mutex> lck(mutex_);
while ( ![]{ return dataReady; }() {
condVar.wait(lck);
}
Even if the shared variable is atomic, it must be modified under the mutex to publish the modification to the waiting thread correctly.
Use a mutex to protect the shared variable
Even if you make dataReady an atomic, it must be modified under the mutex;
if not the modification to the waiting thread may be published, but not correctly synchronised.
This race condition may cause a deadlock.
What does that mean: published, but not correctly synchronised.
Let’s have once more a closer look at the wait workflow and assume that deadReady
is an atomic and is modified not protected by the mutex mutex_.
std::unique_lock<std::mutex> lck(mutex_);
while ( ![]{ return dataReady.load(); }() {
// time window
condVar.wait(lck);
}
Let me assume the notification is send while the condition variable condVar is not in the waiting state.
This means execution of the thread is in the source snippet between line 2 and 4 (see the comment time window).
The result is that the notification is lost.
Afterwards the thread goes back in the waiting state and presumably sleeps forever.
This wouldn’t have happened if dataReady had been protected by a mutex.
Because of the synchronisation with the mutex, the notification would only be sent if the condition variable the notification would only be sent if the condition variable
- Tasks versus Threads
- std::async
- std::packaged_task
- std::promise and std::future
If thepromise
sets the value or the exceptionmore than once
, astd::future_error
exception is thrown.
If you destroy thestd::promise
without calling the set-method or astd::packaged_task
before invoking it, astd::future_error
exception with an error codestd::future_errc::broken_promise
would be stored in the shared state.
If a future fut asks for the resultmore than once
, astd::future_error
exception is thrown.
There is aOne-to-one
relationship between the promise and the future. - std::shared_future
One-to-many
relationship between a promise and many futures. - Exceptions
If the callable used bystd::async
or bystd::packaged_task
throws an error, the exception is store in the shared state.
When the future fut then callsfut.get()
, the exception is rethrown, and the future has to handle it. - Notifications
Condition variables
to synchronise threads multiple times.
Apromise
can send its notification only once.
promise
andfuture
is the first choice
void waitForWork(std::future<void> && fut)
{
std::cout << "Worker: Waiting for work." << std::endl;
fut.wait();
std::cout << "work done\n";
}
void setReady(std::promise<void> &&pro)
{
std::cout << "Send data is ready.\n";
pro.set_value();
}
void test()
{
using namespace std::chrono_literals;
std::promise<void> pro;
std::future<void> fut = pro.get_future();
std::thread t1(waitForWork, std::move(fut));
std::this_thread::sleep_for(2s);
std::thread t2(setReady, std::move(pro));
t1.join();
t2.join();
}
ABA means you read a value twice and each time it returns the same value A.
Therefore you conclude that nothing changed in between.
However, you missed the fact that the value was updated to B somewhere in between.
It is a victim of a spurious wakeup or lost wakeup.
...
...
The thread is wating a notification never fire or fired.
There are two main reasons for deadlocks:
- A mutex has not been unlocked.
- You lock your mutexes in a different order.
void deadlock(std::mutex& a, std::mutex& b) {
std::lock_guard<std::mutex> g1(a);
std::lock_guard<std::mutex> g2(b);
// do something here.
}
int main() {
std::mutex m1, m2;
std::thread t1(deadlock, std::ref(m1), std::ref(m2));
std::thread t2(deadlock, std::ref(m2), std::ref(m1));
return 0;
}
- Keep in mind only lock as soon as needed
- Avoid necked Mutex
- Avoid nested blocks:
Don’t acquire a lock if you already hold one. - Avoid calling user-supplied code while holding a lock
Because the code is user supplied, you have no idea what it could do; it could do anything, including acquiring a lock. - Aquire locks in a fixed order
Using std::lock - Use a lock hierarchy
- Fix deadlock using std::lock and std::scoped_lock
Use std::unique_lock
void fixDeadlock(std::mutex& a, std::mutex& b) {
std::unique_lock<std::mutex> g1(a, std::defer_lock);
std::unique_lock<std::mutex> g1(b, std::defer_lock);
std::lock(g1,g2);
// do something here.
}
or use std::lock_guard
void fixDeadlock(std::mutex& a, std::mutex& b) {
std::lock(a, b);
std::lock_guard<std::mutex> g1(a, std::adopt_lock); // to make sure a will be released
std::lock_guard<std::mutex> g1(b, std::adopt_lock); // to make sure b will be released
// do something here.
}
or use std::scoped_lock
void fixDeadlock(std::mutex& a, std::mutex& b) {
std::scoped_lock scoLock(a, b);
// do something here.
}
False sharing
occurs if two threads read at the same time different variables a and b that are located on the same cache line
.
std::hardware_destructive_interference_size
: returns the minimum offset between two objects to avoid false sharing.std::hardware_constructive_interference_size
: returns the maximum size of contiguous memory to promote true sharing.
struct Sum{
alignas(std::hardware_destructive_interference_size) long long a{0};
alignas(std::hardware_destructive_interference_size) long long b{0};
};
...
std::thread t([]{std::cout << std::this_thread::get_id();});
std::thread t2([]{std::cout << std::this_thread::get_id();});
t = std::move(t2);// Issues: t must be call join() before move
t.join();
t2.join();
....