Skip to content

Commit ba91950

Browse files
committed
add some
add some
1 parent a9a8b60 commit ba91950

File tree

2 files changed

+260
-0
lines changed

2 files changed

+260
-0
lines changed

Code/ObjectPool.md

+206
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,206 @@
1+
# ObjectPool
2+
3+
## 一个超级对象池的实现
4+
5+
6+
7+
对象池对于创建开销比较大的对象来说很有意义,为了避免重复创建开销比较大的对象,我们可以通过对象池来优化。对象池的思路比较简单,事先创建好一批对
8+
象,放到一个集合中,以后每当程序需要新的对象时候,都从对象池里获取,每当程序用完该对象后,都把该对象归还给对象池。这样会避免重复的对象创建,提高
9+
程序性能。先来看看对象池的简单实现:
10+
11+
```c++
12+
#include <list>
13+
14+
template<typename Object>
15+
class ObjectPool
16+
{
17+
public:
18+
19+
ObjectPool(size_t unSize) :
20+
m_unSize(unSize)
21+
{
22+
for (size_t unIdx = 0; unIdx < m_unSize; ++ unIdx)
23+
{
24+
m_oPool.push_back(new Object());
25+
}
26+
}
27+
28+
~ObjectPool()
29+
{
30+
typename std::list<Object *>::iterator oIt = m_oPool.begin();
31+
while (oIt != m_oPool.end())
32+
{
33+
delete (*oIt);
34+
++ oIt;
35+
}
36+
m_unSize = 0;
37+
}
38+
39+
Object * GetObject()
40+
{
41+
Object * pObj = NULL;
42+
if (0 == m_unSize)
43+
{
44+
pObj = new Object();
45+
}
46+
else
47+
{
48+
pObj = m_oPool.front();
49+
m_oPool.pop_front();
50+
-- m_unSize;
51+
}
52+
53+
return pObj;
54+
}
55+
56+
void ReturnObject(Object * pObj)
57+
{
58+
m_oPool.push_back(pObj);
59+
++ m_unSize;
60+
}
61+
62+
private:
63+
size_t m_unSize;
64+
std::list<object *> m_oPool;
65+
};
66+
```
67+
68+
这个object pool的实现很典型,初始创建一定数量的对象,取的时候就直接从池子中取,用完之后再回收到池子。一般的对象池的实现思路和这个类似,这种实现方式虽然能达到目的,但是存在以下不足:
69+
70+
1. 对象池ObjectPool<T>只能容纳特定类型的对象,不能容纳所有类型的对象,可以支持重载的和参数不同的构造函数;
71+
2. 对象用完之后需要手动回收,用起来不够方便,更大的问题是存在忘记回收的风险;
72+
73+
  我希望能有一个更强大的对象池,这个对象池能容纳所有的对象,还能自动回收用完了对象,不需要手动回收,用起来更方便。要实现这样的对象池需要解决前面提到的两个问题,通过c++11就可以解决这两个问题。
74+
75+
  对于问题1:容纳所有的对象。本质上需要将对象池中的对象类型擦除,这里用Any类型就可以解决。
76+
77+
  对于问题2:自动回收用完的对象。这里用智能指针就可以解决,在创建智能指针时可以指定删除器,在删除器中不删除对象,而是回收到对象池中,而这个过程对外界来说是看不见的,由智能指针自己完成。
78+
79+
```c++
80+
#include <string>
81+
#include <functional>
82+
#include <tuple>
83+
#include <map>
84+
85+
#include "Any.hpp"
86+
87+
const int MaxObjectNum = 10;
88+
89+
class ObjectPool
90+
{
91+
template<typename T, typename... Args>
92+
using Constructor = std::function<std::shared_ptr<T>(Args...)>;
93+
public:
94+
95+
ObjectPool() : needClear(false)
96+
{
97+
}
98+
99+
~ObjectPool()
100+
{
101+
needClear = true;
102+
}
103+
104+
//默认创建多少个对象
105+
template<typename T, typename... Args>
106+
void Create(int num)
107+
{
108+
if (num <= 0 || num > MaxObjectNum)
109+
throw std::logic_error("object num errer");
110+
111+
auto constructName = typeid(Constructor<T, Args...>).name();
112+
113+
Constructor<T, Args...> f = [constructName, this](Args... args)
114+
{
115+
return createPtr<T>(string(constructName), args...);
116+
};
117+
118+
m_map.emplace(typeid(T).name(), f);
119+
120+
m_counter.emplace(constructName, num);
121+
}
122+
123+
template<typename T, typename... Args>
124+
std::shared_ptr<T> createPtr(std::string& constructName, Args... args)
125+
{
126+
return std::shared_ptr<T>(new T(args...), [constructName, this](T* t)
127+
{
128+
if (needClear)
129+
delete[] t;
130+
else
131+
m_object_map.emplace(constructName, std::shared_ptr<T>(t));
132+
});
133+
}
134+
135+
template<typename T, typename... Args>
136+
std::shared_ptr<T> Get(Args... args)
137+
{
138+
using ConstructType = Constructor<T, Args...>;
139+
140+
std::string constructName = typeid(ConstructType).name();
141+
auto range = m_map.equal_range(typeid(T).name());
142+
143+
for (auto it = range.first; it != range.second; ++it)
144+
{
145+
if (it->second.Is<ConstructType>())
146+
{
147+
auto ptr = GetInstance<T>(constructName, args...);
148+
149+
if (ptr != nullptr)
150+
return ptr;
151+
152+
return CreateInstance<T, Args...>(it->second, constructName, args...);
153+
}
154+
}
155+
156+
return nullptr;
157+
}
158+
159+
private:
160+
template<typename T, typename... Args>
161+
std::shared_ptr<T> CreateInstance(Any& any,
162+
std::string& constructName, Args... args)
163+
{
164+
using ConstructType = Constructor<T, Args...>;
165+
ConstructType f = any.AnyCast<ConstructType>();
166+
167+
return createPtr<T, Args...>(constructName, args...);
168+
}
169+
170+
template<typename T, typename... Args>
171+
void InitPool(T& f, std::string& constructName, Args... args)
172+
{
173+
int num = m_counter[constructName];
174+
175+
if (num != 0)
176+
{
177+
for (int i = 0; i < num - 1; i++)
178+
{
179+
m_object_map.emplace(constructName, f(args...));
180+
}
181+
m_counter[constructName] = 0;
182+
}
183+
}
184+
185+
template<typename T, typename... Args>
186+
std::shared_ptr<T> GetInstance(std::string& constructName, Args... args)
187+
{
188+
auto it = m_object_map.find(constructName);
189+
if (it == m_object_map.end())
190+
return nullptr;
191+
192+
auto ptr = it->second.AnyCast<std::shared_ptr<T>>();
193+
if (sizeof...(Args)>0)
194+
*ptr.get() = std::move(T(args...));
195+
196+
m_object_map.erase(it);
197+
return ptr;
198+
}
199+
200+
private:
201+
std::multimap<std::string, Any> m_map;
202+
std::multimap<std::string, Any> m_object_map;
203+
std::map<std::string, int> m_counter;
204+
bool needClear;
205+
};
206+
```

ML/深度学习框架自动求导.md

+54
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,54 @@
1+
# 深度学习框架自动求导
2+
3+
推荐一个更好用的自动求导工具。[autograd](https://link.zhihu.com/?target=https%3A//github.com/HIPS/autograd)
4+
5+
Just run pip install autograd
6+
7+
缺陷是,你需要特别注意,这个自动求导的功能指的是对函数第一个参数求导。 所以,所有需要求导的都得以第一个参数传进去
8+
9+
比如这个[A Step by Step Backpropagation Example](https://mattmazur.com/2015/03/17/a-step-by-step-backpropagation-example/), 可以写成
10+
11+
```python
12+
import autograd.numpy as np
13+
from autograd import grad
14+
15+
def sigmoid(x):
16+
return 1.0/(np.exp(-x) + 1)
17+
18+
def predict(params,input):
19+
(W1,B1),(W2,B2) = params
20+
hidden = sigmoid(np.dot(W1,input) + B1)
21+
output = sigmoid(np.dot(W2,hidden) + B2)
22+
return output
23+
24+
def loss(params,input,target):
25+
return (0.5*(target-predict(params,input))**2).sum()
26+
27+
loss_grad = grad(loss)
28+
29+
def train(params, input, target):
30+
grad = loss_grad(params,input,target)
31+
return [(W-0.5*dW,B-0.5*dB) for ((dW,dB),(W,B)) in zip(grad,params)]
32+
33+
W1 = np.array(
34+
[ [0.15,0.20],
35+
[0.25,0.30]])
36+
37+
B1 = 0.35
38+
39+
W2 = np.array(
40+
[ [0.40,0.45],
41+
[0.50,0.55]])
42+
43+
B2 = 0.60
44+
45+
PARAMS = [(W1,B1),(W2,B2)]
46+
INPUT = np.array([0.05,0.10])
47+
TARGET = np.array([0.01,0.99])
48+
49+
while True:
50+
PARAMS = train(PARAMS,INPUT,TARGET)
51+
```
52+
53+
这可比一堆数学公式容易理解多了。
54+

0 commit comments

Comments
 (0)