1 | 1 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,148 @@ |
1 |
+#include "AtomAllocator.h" |
|
2 |
+ |
|
3 |
+////////////////////////////////// Atom //////////////////////////////////////// |
|
4 |
+ |
|
5 |
+Atom::Atom() : pos(0), mass(0.f) {} |
|
6 |
+ |
|
7 |
+Atom::Atom(uint64_t p, float m) : pos(p), mass(m) {} |
|
8 |
+ |
|
9 |
+void Atom::operator=(Atom other) |
|
10 |
+{ |
|
11 |
+ pos = other.pos; |
|
12 |
+ mass = other.mass; |
|
13 |
+} |
|
14 |
+ |
|
15 |
+Archive& operator<<(Archive &ar, Atom &a) |
|
16 |
+{ |
|
17 |
+ ar << a.pos << a.mass; |
|
18 |
+ return ar; |
|
19 |
+} |
|
20 |
+ |
|
21 |
+Archive& operator>>(Archive &ar, Atom &a) |
|
22 |
+{ |
|
23 |
+ ar >> a.pos >> a.mass; |
|
24 |
+ return ar; |
|
25 |
+} |
|
26 |
+ |
|
27 |
+////////////////////////////// IndexFlagSet //////////////////////////////////// |
|
28 |
+ |
|
29 |
+// all bits start as free |
|
30 |
+IndexFlagSet::IndexFlagSet() |
|
31 |
+{ |
|
32 |
+ for (unsigned i = 0; i < NUM_INDEX_CHUNKS; ++i) |
|
33 |
+ { |
|
34 |
+ parts[i] = -1; |
|
35 |
+ } |
|
36 |
+} |
|
37 |
+ |
|
38 |
+// find first set bit |
|
39 |
+// TODO this is compiler dependent |
|
40 |
+unsigned IndexFlagSet::getFirstFree() const |
|
41 |
+{ |
|
42 |
+ GAPS_ASSERT(isAnyFree()); |
|
43 |
+ |
|
44 |
+ for (unsigned i = 0; i < NUM_INDEX_CHUNKS; ++i) |
|
45 |
+ { |
|
46 |
+ if (parts[i] != 0) |
|
47 |
+ { |
|
48 |
+ return __builtin_ffsll(parts[i]) + 64 * i - 1; |
|
49 |
+ } |
|
50 |
+ } |
|
51 |
+ return 0; // ERROR IF REACHED |
|
52 |
+} |
|
53 |
+ |
|
54 |
+// manually check this is consistent with NUM_INDEX_CHUNKS |
|
55 |
+bool IndexFlagSet::isAnyFree() const |
|
56 |
+{ |
|
57 |
+ return (parts[0] | parts[1] | parts[2] | parts[3] | parts[4] | parts[5] |
|
58 |
+ | parts[6] | parts[7]) != 0; |
|
59 |
+} |
|
60 |
+ |
|
61 |
+// set position n to 0 |
|
62 |
+void IndexFlagSet::set(uint64_t n) |
|
63 |
+{ |
|
64 |
+ unsigned i = n / 64; |
|
65 |
+ parts[i] ^= (1ull << (n - 64 * i)); |
|
66 |
+} |
|
67 |
+ |
|
68 |
+// set position n to 1 |
|
69 |
+void IndexFlagSet::release(uint64_t n) |
|
70 |
+{ |
|
71 |
+ unsigned i = n / 64; |
|
72 |
+ parts[i] |= (1ull << (n - 64 * i)); |
|
73 |
+} |
|
74 |
+ |
|
75 |
+//////////////////////////////// AtomPool ////////////////////////////////////// |
|
76 |
+ |
|
77 |
+AtomPool::AtomPool() |
|
78 |
+{ |
|
79 |
+ mPool = new Atom[POOL_SIZE]; |
|
80 |
+} |
|
81 |
+ |
|
82 |
+AtomPool::~AtomPool() |
|
83 |
+{ |
|
84 |
+ delete[] mPool; |
|
85 |
+} |
|
86 |
+ |
|
87 |
+Atom* AtomPool::alloc() |
|
88 |
+{ |
|
89 |
+ unsigned n = mIndexFlags.getFirstFree(); |
|
90 |
+ mIndexFlags.set(n); |
|
91 |
+ Atom *a = &(mPool[n]); |
|
92 |
+ a->poolIndex = n; |
|
93 |
+ return a; |
|
94 |
+} |
|
95 |
+ |
|
96 |
+void AtomPool::free(Atom* a) |
|
97 |
+{ |
|
98 |
+ mIndexFlags.release(a->poolIndex); |
|
99 |
+} |
|
100 |
+ |
|
101 |
+bool AtomPool::depleted() const |
|
102 |
+{ |
|
103 |
+ return !mIndexFlags.isAnyFree(); |
|
104 |
+} |
|
105 |
+ |
|
106 |
+////////////////////////////// AtomAllocator /////////////////////////////////// |
|
107 |
+ |
|
108 |
+AtomAllocator::AtomAllocator() |
|
109 |
+{ |
|
110 |
+ mIndex = 0; |
|
111 |
+ mPools.push_back(new AtomPool()); |
|
112 |
+} |
|
113 |
+ |
|
114 |
+AtomAllocator::~AtomAllocator() |
|
115 |
+{ |
|
116 |
+ std::vector<AtomPool*>::iterator it = mPools.begin(); |
|
117 |
+ for (; it != mPools.end(); ++it) |
|
118 |
+ { |
|
119 |
+ delete *it; |
|
120 |
+ } |
|
121 |
+} |
|
122 |
+ |
|
123 |
+Atom* AtomAllocator::createAtom(uint64_t pos, float mass) |
|
124 |
+{ |
|
125 |
+ GAPS_ASSERT(mPools.size() < 65536); |
|
126 |
+ |
|
127 |
+ if (mPools[mIndex]->depleted() && mIndex == mPools.size() - 1) |
|
128 |
+ { |
|
129 |
+ mPools.push_back(new AtomPool()); |
|
130 |
+ mIndex = 0; // loop back around all pools before getting to new one |
|
131 |
+ } |
|
132 |
+ |
|
133 |
+ while (mPools[mIndex]->depleted()) |
|
134 |
+ { |
|
135 |
+ ++mIndex; |
|
136 |
+ } |
|
137 |
+ |
|
138 |
+ Atom *a = mPools[mIndex]->alloc(); |
|
139 |
+ a->allocatorIndex = mIndex; |
|
140 |
+ a->pos = pos; |
|
141 |
+ a->mass = mass; |
|
142 |
+ return a; |
|
143 |
+} |
|
144 |
+ |
|
145 |
+void AtomAllocator::destroyAtom(Atom *a) |
|
146 |
+{ |
|
147 |
+ mPools[a->allocatorIndex]->free(a); |
|
148 |
+} |
0 | 149 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,98 @@ |
1 |
+#ifndef __COGAPS_ATOM_ALLOCATOR_H__ |
|
2 |
+#define __COGAPS_ATOM_ALLOCATOR_H__ |
|
3 |
+ |
|
4 |
+#include "utils/Archive.h" |
|
5 |
+ |
|
6 |
+#include <stdint.h> |
|
7 |
+#include <vector> |
|
8 |
+ |
|
9 |
+// NOTE: can only have 65536 pools with a 16bit index, so POOL_SIZE * 65536 |
|
10 |
+// is the maximum number of atoms CoGAPS can have - don't change this unless |
|
11 |
+// you know what you are doing |
|
12 |
+#define POOL_SIZE 512 // allows for 33.5 million atoms |
|
13 |
+#define NUM_INDEX_CHUNKS POOL_SIZE / 64 |
|
14 |
+ |
|
15 |
+class AtomAllocator; |
|
16 |
+class AtomPool; |
|
17 |
+ |
|
18 |
+struct Atom |
|
19 |
+{ |
|
20 |
+public: |
|
21 |
+ |
|
22 |
+ uint64_t pos; |
|
23 |
+ float mass; |
|
24 |
+ |
|
25 |
+ Atom(); |
|
26 |
+ Atom(uint64_t p, float m); |
|
27 |
+ |
|
28 |
+ void operator=(Atom other); |
|
29 |
+ |
|
30 |
+ friend Archive& operator<<(Archive& ar, Atom &a); |
|
31 |
+ friend Archive& operator>>(Archive& ar, Atom &a); |
|
32 |
+ |
|
33 |
+private: |
|
34 |
+ |
|
35 |
+ // used by the allocator |
|
36 |
+ friend class AtomAllocator; |
|
37 |
+ friend class AtomPool; |
|
38 |
+ uint16_t poolIndex; |
|
39 |
+ uint16_t allocatorIndex; |
|
40 |
+}; |
|
41 |
+ |
|
42 |
+// stores flags for POOL_SIZE indices |
|
43 |
+struct IndexFlagSet |
|
44 |
+{ |
|
45 |
+public: |
|
46 |
+ |
|
47 |
+ IndexFlagSet(); |
|
48 |
+ |
|
49 |
+ unsigned getFirstFree() const; |
|
50 |
+ bool isAnyFree() const; |
|
51 |
+ void set(uint64_t n); |
|
52 |
+ void release(uint64_t n); |
|
53 |
+ |
|
54 |
+private: |
|
55 |
+ |
|
56 |
+ uint64_t parts[NUM_INDEX_CHUNKS]; |
|
57 |
+}; |
|
58 |
+ |
|
59 |
+class AtomPool |
|
60 |
+{ |
|
61 |
+public: |
|
62 |
+ |
|
63 |
+ AtomPool(); |
|
64 |
+ ~AtomPool(); |
|
65 |
+ |
|
66 |
+ Atom* alloc(); |
|
67 |
+ void free(Atom* a); |
|
68 |
+ bool depleted() const; |
|
69 |
+ |
|
70 |
+private: |
|
71 |
+ |
|
72 |
+ Atom* mPool; |
|
73 |
+ IndexFlagSet mIndexFlags; |
|
74 |
+ |
|
75 |
+ AtomPool(const AtomPool &pool); // = delete (no c++11) |
|
76 |
+ AtomPool& operator=(const AtomPool &pool); // = delete (no c++11) |
|
77 |
+}; |
|
78 |
+ |
|
79 |
+class AtomAllocator |
|
80 |
+{ |
|
81 |
+public: |
|
82 |
+ |
|
83 |
+ AtomAllocator(); |
|
84 |
+ ~AtomAllocator(); |
|
85 |
+ |
|
86 |
+ Atom* createAtom(uint64_t pos, float mass); |
|
87 |
+ void destroyAtom(Atom *a); |
|
88 |
+ |
|
89 |
+private: |
|
90 |
+ |
|
91 |
+ std::vector<AtomPool*> mPools; |
|
92 |
+ unsigned mIndex; |
|
93 |
+ |
|
94 |
+ AtomAllocator(const AtomAllocator &pool); // = delete (no c++11) |
|
95 |
+ AtomAllocator& operator=(const AtomAllocator &pool); // = delete (no c++11) |
|
96 |
+}; |
|
97 |
+ |
|
98 |
+#endif |
|
0 | 99 |
\ No newline at end of file |
... | ... |
@@ -30,44 +30,6 @@ static bool vecContains(const std::vector<uint64_t> &vec, uint64_t pos) |
30 | 30 |
return std::binary_search(vec.begin(), vec.end(), pos); |
31 | 31 |
} |
32 | 32 |
|
33 |
-// used in debug mode to check if vector is always sorted |
|
34 |
-bool AtomicDomain::isSorted() |
|
35 |
-{ |
|
36 |
- for (unsigned i = 1; i < mAtoms.size(); ++i) |
|
37 |
- { |
|
38 |
- if (mAtoms[i]->pos <= mAtoms[i-1]->pos) |
|
39 |
- { |
|
40 |
- gaps_printf("unsorted\n%llu\n%llu\n", mAtoms[i-1]->pos, mAtoms[i]->pos); |
|
41 |
- return false; |
|
42 |
- } |
|
43 |
- } |
|
44 |
- return true; |
|
45 |
-} |
|
46 |
- |
|
47 |
-////////////////////////////////// ATOM //////////////////////////////////////// |
|
48 |
- |
|
49 |
-Atom::Atom() : pos(0), mass(0.f) {} |
|
50 |
- |
|
51 |
-Atom::Atom(uint64_t p, float m) : pos(p), mass(m) {} |
|
52 |
- |
|
53 |
-void Atom::operator=(Atom other) |
|
54 |
-{ |
|
55 |
- pos = other.pos; |
|
56 |
- mass = other.mass; |
|
57 |
-} |
|
58 |
- |
|
59 |
-Archive& operator<<(Archive &ar, Atom &a) |
|
60 |
-{ |
|
61 |
- ar << a.pos << a.mass; |
|
62 |
- return ar; |
|
63 |
-} |
|
64 |
- |
|
65 |
-Archive& operator>>(Archive &ar, Atom &a) |
|
66 |
-{ |
|
67 |
- ar >> a.pos >> a.mass; |
|
68 |
- return ar; |
|
69 |
-} |
|
70 |
- |
|
71 | 33 |
//////////////////////////// ATOM NEIGHBORHOOD ///////////////////////////////// |
72 | 34 |
|
73 | 35 |
AtomNeighborhood::AtomNeighborhood() |
... | ... |
@@ -96,14 +58,6 @@ AtomicDomain::AtomicDomain(uint64_t nBins) |
96 | 58 |
mDomainLength = binLength * nBins; |
97 | 59 |
} |
98 | 60 |
|
99 |
-AtomicDomain::~AtomicDomain() |
|
100 |
-{ |
|
101 |
- for (unsigned i = 0; i < mAtoms.size(); ++i) |
|
102 |
- { |
|
103 |
- delete mAtoms[i]; |
|
104 |
- } |
|
105 |
-} |
|
106 |
- |
|
107 | 61 |
Atom* AtomicDomain::front() |
108 | 62 |
{ |
109 | 63 |
GAPS_ASSERT(size() > 0); |
... | ... |
@@ -111,33 +65,11 @@ Atom* AtomicDomain::front() |
111 | 65 |
return mAtoms.front(); |
112 | 66 |
} |
113 | 67 |
|
114 |
-Atom* AtomicDomain::randomAtom(GapsRng *rng, const SmallPairedHashSetU64 &moves) |
|
68 |
+Atom* AtomicDomain::randomAtom(GapsRng *rng) |
|
115 | 69 |
{ |
116 | 70 |
GAPS_ASSERT(size() > 0); |
117 |
- GAPS_ASSERT(isSorted()); |
|
118 |
- |
|
119 |
- //unsigned index = rng->uniform32(0, mAtoms.size() - 1); |
|
120 |
- //gaps_printf("size: %d random index: %d\n", mAtoms.size(), index); |
|
121 |
- //return mAtoms[index]; |
|
122 |
- |
|
123 |
- uint64_t pos = rng->uniform64(1, mDomainLength); |
|
124 |
- std::vector<Atom*>::iterator it = std::lower_bound(mAtoms.begin(), |
|
125 |
- mAtoms.end(), pos, compareAtomLower); |
|
126 |
- unsigned index = std::distance(mAtoms.begin(), it); |
|
127 |
- unsigned leftIndex = index == 0 ? 0 : index - 1; |
|
128 |
- index = (index == mAtoms.size()) ? index - 1 : index; |
|
129 |
- |
|
130 |
- if (moves.contains(mAtoms[leftIndex]->pos) || moves.contains(mAtoms[index]->pos)) |
|
131 |
- { |
|
132 |
- return NULL; |
|
133 |
- } |
|
134 | 71 |
|
135 |
- if (std::abs(mAtoms[leftIndex]->pos - pos) < std::abs(mAtoms[index]->pos - pos)) |
|
136 |
- { |
|
137 |
- index = leftIndex; |
|
138 |
- } |
|
139 |
- |
|
140 |
- //gaps_printf("size: %d random index: %d\n", mAtoms.size(), index); |
|
72 |
+ unsigned index = rng->uniform32(0, mAtoms.size() - 1); |
|
141 | 73 |
return mAtoms[index]; |
142 | 74 |
} |
143 | 75 |
|
... | ... |
@@ -145,67 +77,28 @@ AtomNeighborhood AtomicDomain::randomAtomWithNeighbors(GapsRng *rng) |
145 | 77 |
{ |
146 | 78 |
GAPS_ASSERT(size() > 0); |
147 | 79 |
|
148 |
- //unsigned index = rng->uniform32(0, mAtoms.size() - 1); |
|
149 |
- uint64_t pos = rng->uniform64(1, mDomainLength); |
|
150 |
- std::vector<Atom*>::iterator it = std::lower_bound(mAtoms.begin(), |
|
151 |
- mAtoms.end(), pos, compareAtomLower); |
|
152 |
- unsigned index = std::distance(mAtoms.begin(), it); |
|
153 |
- unsigned leftIndex = index == 0 ? 0 : index - 1; |
|
154 |
- index = (index == mAtoms.size()) ? index - 1 : index; |
|
155 |
- if (std::abs(mAtoms[leftIndex]->pos - pos) < std::abs(mAtoms[index]->pos - pos)) |
|
156 |
- { |
|
157 |
- index = leftIndex; |
|
158 |
- } |
|
159 |
- |
|
160 |
- //gaps_printf("index: %d, size: %d\n", index, mAtoms.size()); |
|
80 |
+ unsigned index = rng->uniform32(0, mAtoms.size() - 1); |
|
161 | 81 |
Atom* left = (index == 0) ? NULL : mAtoms[index - 1]; |
162 | 82 |
Atom* right = (index == mAtoms.size() - 1) ? NULL : mAtoms[index + 1]; |
163 | 83 |
return AtomNeighborhood(left, mAtoms[index], right); |
164 | 84 |
} |
165 | 85 |
|
166 |
-AtomNeighborhood AtomicDomain::randomAtomWithRightNeighbor(GapsRng *rng, |
|
167 |
-const SmallPairedHashSetU64 &moves) |
|
86 |
+AtomNeighborhood AtomicDomain::randomAtomWithRightNeighbor(GapsRng *rng) |
|
168 | 87 |
{ |
169 | 88 |
GAPS_ASSERT(size() > 0); |
170 | 89 |
|
171 |
- //unsigned index = rng->uniform32(0, mAtoms.size() - 1); |
|
172 |
- uint64_t pos = rng->uniform64(1, mDomainLength); |
|
173 |
- std::vector<Atom*>::iterator it = std::lower_bound(mAtoms.begin(), |
|
174 |
- mAtoms.end(), pos, compareAtomLower); |
|
175 |
- unsigned index = std::distance(mAtoms.begin(), it); |
|
176 |
- unsigned leftIndex = index == 0 ? 0 : index - 1; |
|
177 |
- index = (index == mAtoms.size()) ? index - 1 : index; |
|
178 |
- |
|
179 |
- if (moves.contains(mAtoms[leftIndex]->pos) || moves.contains(mAtoms[index]->pos)) |
|
180 |
- { |
|
181 |
- return AtomNeighborhood(NULL, NULL, NULL); |
|
182 |
- } |
|
183 |
- |
|
184 |
- if (std::abs(mAtoms[leftIndex]->pos - pos) < std::abs(mAtoms[index]->pos - pos)) |
|
185 |
- { |
|
186 |
- index = leftIndex; |
|
187 |
- } |
|
188 |
- |
|
90 |
+ unsigned index = rng->uniform32(0, mAtoms.size() - 1); |
|
189 | 91 |
Atom* right = (index == mAtoms.size() - 1) ? NULL : mAtoms[index + 1]; |
190 | 92 |
return AtomNeighborhood(NULL, mAtoms[index], right); |
191 | 93 |
} |
192 | 94 |
|
193 |
-uint64_t AtomicDomain::randomFreePosition(GapsRng *rng, |
|
194 |
-const std::vector<uint64_t> &possibleDeaths) const |
|
95 |
+uint64_t AtomicDomain::randomFreePosition(GapsRng *rng) const |
|
195 | 96 |
{ |
196 | 97 |
uint64_t pos = rng->uniform64(1, mDomainLength); |
197 | 98 |
while (vecContains(mAtoms, pos)) |
198 | 99 |
{ |
199 |
- if (vecContains(possibleDeaths, pos)) |
|
200 |
- { |
|
201 |
- return 0; // might actually be a free position |
|
202 |
- } |
|
203 | 100 |
pos = rng->uniform64(1, mDomainLength); |
204 | 101 |
} |
205 |
- if (vecContains(possibleDeaths, pos)) |
|
206 |
- { |
|
207 |
- return 0; // might actually be a free position |
|
208 |
- } |
|
209 | 102 |
return pos; |
210 | 103 |
} |
211 | 104 |
|
... | ... |
@@ -216,23 +109,18 @@ uint64_t AtomicDomain::size() const |
216 | 109 |
|
217 | 110 |
Atom* AtomicDomain::insert(uint64_t pos, float mass) |
218 | 111 |
{ |
219 |
- Atom *newAtom = new Atom(pos, mass); |
|
220 |
- std::vector<Atom*>::iterator it; |
|
221 |
- #pragma omp critical(AtomicInsertOrErase) |
|
222 |
- { |
|
223 |
- GAPS_ASSERT(!vecContains(mAtoms, pos)); |
|
112 |
+ GAPS_ASSERT(!vecContains(mAtoms, pos)); |
|
224 | 113 |
|
225 |
- it = std::lower_bound(mAtoms.begin(), mAtoms.end(), pos, compareAtomLower); |
|
226 |
- it = mAtoms.insert(it, newAtom); |
|
114 |
+ std::vector<Atom*>::iterator it; |
|
115 |
+ it = std::lower_bound(mAtoms.begin(), mAtoms.end(), pos, compareAtomLower); |
|
116 |
+ it = mAtoms.insert(it, mAllocator.createAtom(pos, mass)); |
|
227 | 117 |
|
228 |
- GAPS_ASSERT(vecContains(mAtoms, pos)); |
|
229 |
- } |
|
118 |
+ GAPS_ASSERT(vecContains(mAtoms, pos)); |
|
230 | 119 |
return *it; |
231 | 120 |
} |
232 | 121 |
|
233 | 122 |
void AtomicDomain::erase(uint64_t pos) |
234 | 123 |
{ |
235 |
- Atom *a = NULL; |
|
236 | 124 |
#pragma omp critical(AtomicInsertOrErase) |
237 | 125 |
{ |
238 | 126 |
GAPS_ASSERT(size() > 0); |
... | ... |
@@ -240,47 +128,13 @@ void AtomicDomain::erase(uint64_t pos) |
240 | 128 |
|
241 | 129 |
std::vector<Atom*>::iterator it = std::lower_bound(mAtoms.begin(), |
242 | 130 |
mAtoms.end(), pos, compareAtomLower); |
243 |
- a = *it; |
|
131 |
+ mAllocator.destroyAtom(*it); |
|
244 | 132 |
mAtoms.erase(it); |
245 | 133 |
|
246 | 134 |
GAPS_ASSERT(!vecContains(mAtoms, pos)); |
247 | 135 |
} |
248 |
- delete a; |
|
249 | 136 |
} |
250 | 137 |
|
251 |
-// for moving across a later birth (already present in domain) |
|
252 |
-/*void AtomicDomain::move(uint64_t src, uint64_t dest) |
|
253 |
-{ |
|
254 |
- #pragma omp critical(AtomicInsertOrErase) |
|
255 |
- { |
|
256 |
- GAPS_ASSERT(size() > 0); |
|
257 |
- GAPS_ASSERT_MSG(vecContains(mAtoms, src), src); |
|
258 |
- GAPS_ASSERT(!vecContains(mAtoms, dest)); |
|
259 |
- |
|
260 |
- std::vector<Atom*>::iterator it = std::lower_bound(mAtoms.begin(), |
|
261 |
- mAtoms.end(), src, compareAtomLower); |
|
262 |
- unsigned ndx = std::distance(mAtoms.begin(), it); |
|
263 |
- while (ndx + 1 < mAtoms.size() && dest > mAtoms[ndx + 1]->pos) |
|
264 |
- { |
|
265 |
- Atom* temp = mAtoms[ndx]; |
|
266 |
- mAtoms[ndx] = mAtoms[ndx + 1]; |
|
267 |
- mAtoms[ndx + 1] = temp; |
|
268 |
- ++ndx; |
|
269 |
- } |
|
270 |
- while (ndx > 0 && dest < mAtoms[ndx - 1]->pos) |
|
271 |
- { |
|
272 |
- Atom* temp = mAtoms[ndx]; |
|
273 |
- mAtoms[ndx] = mAtoms[ndx - 1]; |
|
274 |
- mAtoms[ndx - 1] = temp; |
|
275 |
- --ndx; |
|
276 |
- } |
|
277 |
- mAtoms[ndx]->pos = dest; |
|
278 |
- |
|
279 |
- GAPS_ASSERT(!vecContains(mAtoms, src)); |
|
280 |
- GAPS_ASSERT(vecContains(mAtoms, dest)); |
|
281 |
- } |
|
282 |
-}*/ |
|
283 |
- |
|
284 | 138 |
Archive& operator<<(Archive &ar, AtomicDomain &domain) |
285 | 139 |
{ |
286 | 140 |
ar << domain.mDomainLength << domain.mAtoms.size(); |
... | ... |
@@ -304,3 +158,34 @@ Archive& operator>>(Archive &ar, AtomicDomain &domain) |
304 | 158 |
} |
305 | 159 |
return ar; |
306 | 160 |
} |
161 |
+ |
|
162 |
+////////////////////// ATOMIC DOMAIN DEBUG FUNCTIONS /////////////////////////// |
|
163 |
+ |
|
164 |
+#ifdef GAPS_DEBUG |
|
165 |
+ |
|
166 |
+std::vector<Atom*>::iterator AtomicDomain::begin() |
|
167 |
+{ |
|
168 |
+ return mAtoms.begin(); |
|
169 |
+} |
|
170 |
+ |
|
171 |
+std::vector<Atom*>::iterator AtomicDomain::end() |
|
172 |
+{ |
|
173 |
+ return mAtoms.end(); |
|
174 |
+} |
|
175 |
+ |
|
176 |
+// used in debug mode to check if vector is always sorted |
|
177 |
+bool AtomicDomain::isSorted() |
|
178 |
+{ |
|
179 |
+ for (unsigned i = 1; i < mAtoms.size(); ++i) |
|
180 |
+ { |
|
181 |
+ if (mAtoms[i]->pos <= mAtoms[i-1]->pos) |
|
182 |
+ { |
|
183 |
+ gaps_printf("unsorted\n%llu\n%llu\n", mAtoms[i-1]->pos, mAtoms[i]->pos); |
|
184 |
+ return false; |
|
185 |
+ } |
|
186 |
+ } |
|
187 |
+ return true; |
|
188 |
+} |
|
189 |
+ |
|
190 |
+#endif |
|
191 |
+ |
... | ... |
@@ -1,26 +1,14 @@ |
1 | 1 |
#ifndef __COGAPS_ATOMIC_DOMAIN_H__ |
2 | 2 |
#define __COGAPS_ATOMIC_DOMAIN_H__ |
3 | 3 |
|
4 |
+#include "AtomAllocator.h" |
|
4 | 5 |
#include "data_structures/HashSets.h" |
5 | 6 |
#include "math/Random.h" |
7 |
+#include "math/SIMD.h" |
|
6 | 8 |
#include "utils/Archive.h" |
7 | 9 |
|
8 | 10 |
#include <vector> |
9 | 11 |
|
10 |
-struct Atom |
|
11 |
-{ |
|
12 |
- uint64_t pos; |
|
13 |
- float mass; |
|
14 |
- |
|
15 |
- Atom(); |
|
16 |
- Atom(uint64_t p, float m); |
|
17 |
- |
|
18 |
- void operator=(Atom other); |
|
19 |
- |
|
20 |
- friend Archive& operator<<(Archive& ar, Atom &a); |
|
21 |
- friend Archive& operator>>(Archive& ar, Atom &a); |
|
22 |
-}; |
|
23 |
- |
|
24 | 12 |
struct AtomNeighborhood |
25 | 13 |
{ |
26 | 14 |
Atom* center; |
... | ... |
@@ -41,34 +29,28 @@ class AtomicDomain |
41 | 29 |
public: |
42 | 30 |
|
43 | 31 |
AtomicDomain(uint64_t nBins); |
44 |
- ~AtomicDomain(); |
|
45 | 32 |
|
46 | 33 |
// access atoms |
47 | 34 |
Atom* front(); |
48 |
- Atom* randomAtom(GapsRng *rng, const SmallPairedHashSetU64 &moves); |
|
35 |
+ Atom* randomAtom(GapsRng *rng); |
|
49 | 36 |
AtomNeighborhood randomAtomWithNeighbors(GapsRng *rng); |
50 |
- AtomNeighborhood randomAtomWithRightNeighbor(GapsRng *rng, const SmallPairedHashSetU64 &moves); |
|
37 |
+ AtomNeighborhood randomAtomWithRightNeighbor(GapsRng *rng); |
|
51 | 38 |
|
52 |
- Atom* getLeftNeighbor(uint64_t pos); |
|
53 |
- Atom* getRightNeighbor(uint64_t pos); |
|
54 |
- |
|
55 |
- uint64_t randomFreePosition(GapsRng *rng, |
|
56 |
- const std::vector<uint64_t> &possibleDeaths) const; |
|
39 |
+ uint64_t randomFreePosition(GapsRng *rng) const; |
|
57 | 40 |
uint64_t size() const; |
58 | 41 |
|
59 |
- // these need to happen concurrently without invalidating pointers |
|
42 |
+ // this needs to happen concurrently without invalidating pointers |
|
60 | 43 |
void erase(uint64_t pos); |
61 |
- //void move(uint64_t src, uint64_t dest); |
|
62 |
- |
|
63 |
- // iterators |
|
64 |
- std::vector<Atom*>::iterator begin() { return mAtoms.begin(); } |
|
65 |
- std::vector<Atom*>::iterator end() { return mAtoms.end(); } |
|
66 |
- |
|
67 |
- bool isSorted(); |
|
68 | 44 |
|
69 | 45 |
// serialization |
70 | 46 |
friend Archive& operator<<(Archive &ar, AtomicDomain &domain); |
71 | 47 |
friend Archive& operator>>(Archive &ar, AtomicDomain &domain); |
48 |
+ |
|
49 |
+#ifdef GAPS_DEBUG |
|
50 |
+ std::vector<Atom*>::iterator begin() { return mAtoms.begin(); } |
|
51 |
+ std::vector<Atom*>::iterator end() { return mAtoms.end(); } |
|
52 |
+ bool isSorted(); |
|
53 |
+#endif |
|
72 | 54 |
|
73 | 55 |
#ifndef GAPS_INTERNAL_TESTS |
74 | 56 |
private: |
... | ... |
@@ -81,8 +63,9 @@ private: |
81 | 63 |
// size of atomic domain to ensure all bins are equal length |
82 | 64 |
uint64_t mDomainLength; |
83 | 65 |
|
84 |
- // domain storage, sorted vector |
|
66 |
+ // domain storage, sorted vector of pointers to atoms created by allocator |
|
85 | 67 |
std::vector<Atom*> mAtoms; |
68 |
+ AtomAllocator mAllocator; |
|
86 | 69 |
}; |
87 | 70 |
|
88 |
-#endif // __COGAPS_ATOMIC_DOMAIN_H__ |
|
89 | 71 |
\ No newline at end of file |
72 |
+#endif // __COGAPS_ATOMIC_DOMAIN_H__ |
... | ... |
@@ -94,10 +94,6 @@ GapsResult GapsRunner::run(bool printThreads) |
94 | 94 |
} |
95 | 95 |
#endif |
96 | 96 |
|
97 |
-#ifdef __GAPS_DEBUG_NO_QUEUE__ |
|
98 |
- gaps_printf("Running without a queue\n"); |
|
99 |
-#endif |
|
100 |
- |
|
101 | 97 |
// cascade through phases, allows algorithm to be resumed in either phase |
102 | 98 |
GAPS_ASSERT(mPhase == 'C' || mPhase == 'S'); |
103 | 99 |
switch (mPhase) |
... | ... |
@@ -73,11 +73,7 @@ void GibbsSampler::update(unsigned nSteps, unsigned nCores) |
73 | 73 |
while (n < nSteps) |
74 | 74 |
{ |
75 | 75 |
// populate queue, prepare domain for this queue |
76 |
- #ifdef __GAPS_DEBUG_NO_QUEUE__ |
|
77 |
- mQueue.populate(mDomain, 1); |
|
78 |
- #else |
|
79 | 76 |
mQueue.populate(mDomain, nSteps - n); |
80 |
- #endif |
|
81 | 77 |
n += mQueue.size(); |
82 | 78 |
|
83 | 79 |
// process all proposed updates |
... | ... |
@@ -88,6 +84,7 @@ void GibbsSampler::update(unsigned nSteps, unsigned nCores) |
88 | 84 |
} |
89 | 85 |
mQueue.clear(); |
90 | 86 |
} |
87 |
+ |
|
91 | 88 |
GAPS_ASSERT(internallyConsistent()); |
92 | 89 |
GAPS_ASSERT(mDomain.isSorted()); |
93 | 90 |
} |
... | ... |
@@ -142,7 +139,7 @@ void GibbsSampler::death(const AtomicProposal &prop) |
142 | 139 |
AlphaParameters alpha = alphaParametersWithChange(prop.r1, prop.c1, |
143 | 140 |
-prop.atom1->mass); |
144 | 141 |
|
145 |
- // try to calculate rebirth mass using gibbs distribution, otherwise exponetial |
|
142 |
+ // try to calculate rebirth mass using gibbs distribution, otherwise use original |
|
146 | 143 |
float rebirthMass = prop.atom1->mass; |
147 | 144 |
if (canUseGibbs(prop.c1)) |
148 | 145 |
{ |
... | ... |
@@ -112,10 +112,6 @@ float ProposalQueue::deathProb(double nAtoms) const |
112 | 112 |
|
113 | 113 |
bool ProposalQueue::makeProposal(AtomicDomain &domain) |
114 | 114 |
{ |
115 |
- mU1 = mUseCachedRng ? mU1 : mRng.uniform(); |
|
116 |
- mU2 = mUseCachedRng ? mU2: mRng.uniform(); |
|
117 |
- mUseCachedRng = false; |
|
118 |
- |
|
119 | 115 |
if (mMinAtoms < 2 && mMaxAtoms >= 2) |
120 | 116 |
{ |
121 | 117 |
return false; // special indeterminate case |
... | ... |
@@ -126,6 +122,10 @@ bool ProposalQueue::makeProposal(AtomicDomain &domain) |
126 | 122 |
return birth(domain); // always birth when no atoms exist |
127 | 123 |
} |
128 | 124 |
|
125 |
+ mU1 = mUseCachedRng ? mU1 : mRng.uniform(); |
|
126 |
+ mU2 = mUseCachedRng ? mU2: mRng.uniform(); |
|
127 |
+ mUseCachedRng = false; |
|
128 |
+ |
|
129 | 129 |
float lowerBound = deathProb(static_cast<double>(mMinAtoms)); |
130 | 130 |
float upperBound = deathProb(static_cast<double>(mMaxAtoms)); |
131 | 131 |
|
... | ... |
@@ -147,14 +147,7 @@ bool ProposalQueue::makeProposal(AtomicDomain &domain) |
147 | 147 |
bool ProposalQueue::birth(AtomicDomain &domain) |
148 | 148 |
{ |
149 | 149 |
AtomicProposal prop('B'); |
150 |
- uint64_t pos = domain.randomFreePosition(&(prop.rng), mUsedAtoms.vec()); |
|
151 |
- |
|
152 |
- if (pos == 0) |
|
153 |
- { |
|
154 |
- DEBUG_PING // want to notify since this event should have near 0 probability |
|
155 |
- GapsRng::rollBackOnce(); // ensure same proposal next time |
|
156 |
- return false; // atom conflict, might have open spot if atom moves/dies |
|
157 |
- } |
|
150 |
+ uint64_t pos = domain.randomFreePosition(&(prop.rng)); |
|
158 | 151 |
|
159 | 152 |
if (mProposedMoves.overlap(pos)) |
160 | 153 |
{ |
... | ... |
@@ -181,14 +174,7 @@ bool ProposalQueue::birth(AtomicDomain &domain) |
181 | 174 |
bool ProposalQueue::death(AtomicDomain &domain) |
182 | 175 |
{ |
183 | 176 |
AtomicProposal prop('D'); |
184 |
- prop.atom1 = domain.randomAtom(&(prop.rng), mProposedMoves); |
|
185 |
- |
|
186 |
- if (prop.atom1 == NULL) |
|
187 |
- { |
|
188 |
- GapsRng::rollBackOnce(); // ensure same proposal next time |
|
189 |
- return false; // can't select atom |
|
190 |
- } |
|
191 |
- |
|
177 |
+ prop.atom1 = domain.randomAtom(&(prop.rng)); |
|
192 | 178 |
prop.r1 = (prop.atom1->pos / mBinLength) / mNumCols; |
193 | 179 |
prop.c1 = (prop.atom1->pos / mBinLength) % mNumCols; |
194 | 180 |
|
... | ... |
@@ -249,14 +235,7 @@ bool ProposalQueue::move(AtomicDomain &domain) |
249 | 235 |
bool ProposalQueue::exchange(AtomicDomain &domain) |
250 | 236 |
{ |
251 | 237 |
AtomicProposal prop('E'); |
252 |
- AtomNeighborhood hood = domain.randomAtomWithRightNeighbor(&(prop.rng), mProposedMoves); |
|
253 |
- |
|
254 |
- if (hood.center == NULL) |
|
255 |
- { |
|
256 |
- GapsRng::rollBackOnce(); // ensure same proposal next time |
|
257 |
- return false; // can't select atom |
|
258 |
- } |
|
259 |
- |
|
238 |
+ AtomNeighborhood hood = domain.randomAtomWithRightNeighbor(&(prop.rng)); |
|
260 | 239 |
prop.atom1 = hood.center; |
261 | 240 |
prop.atom2 = hood.hasRight() ? hood.right : domain.front(); |
262 | 241 |
prop.r1 = (prop.atom1->pos / mBinLength) / mNumCols; |