1 /* This file is part of Metaproxy.
2 Copyright (C) 2005-2012 Index Data
4 Metaproxy is free software; you can redistribute it and/or modify it under
5 the terms of the GNU General Public License as published by the Free
6 Software Foundation; either version 2, or (at your option) any later
9 Metaproxy is distributed in the hope that it will be useful, but WITHOUT ANY
10 WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
29 #include <boost/thread/thread.hpp>
30 #include <boost/thread/mutex.hpp>
31 #include <boost/thread/condition.hpp>
39 #include <yazpp/socket-observer.h>
42 #include "thread_pool_observer.hpp"
45 namespace metaproxy_1 {
46 class ThreadPoolSocketObserver::Worker {
48 Worker(ThreadPoolSocketObserver *s) : m_s(s) {};
49 ThreadPoolSocketObserver *m_s;
50 void operator() (void) {
55 class ThreadPoolSocketObserver::Rep : public boost::noncopyable {
56 friend class ThreadPoolSocketObserver;
58 Rep(yazpp_1::ISocketObservable *obs);
61 yazpp_1::ISocketObservable *m_socketObservable;
63 boost::thread_group m_thrds;
64 boost::mutex m_mutex_input_data;
65 boost::condition m_cond_input_data;
66 boost::condition m_cond_input_full;
67 boost::mutex m_mutex_output_data;
68 std::deque<IThreadPoolMsg *> m_input;
69 std::deque<IThreadPoolMsg *> m_output;
71 unsigned m_no_threads;
72 unsigned m_no_threads_waiting;
74 const unsigned int queue_size_per_thread = 64;
79 using namespace yazpp_1;
80 using namespace metaproxy_1;
82 ThreadPoolSocketObserver::Rep::Rep(yazpp_1::ISocketObservable *obs)
83 : m_socketObservable(obs), m_pipe(9123)
87 ThreadPoolSocketObserver::Rep::~Rep()
91 IThreadPoolMsg::~IThreadPoolMsg()
96 ThreadPoolSocketObserver::ThreadPoolSocketObserver(
97 yazpp_1::ISocketObservable *obs, int no_threads)
100 obs->addObserver(m_p->m_pipe.read_fd(), this);
101 obs->maskObserver(this, SOCKET_OBSERVE_READ);
103 m_p->m_stop_flag = false;
104 m_p->m_no_threads = no_threads;
105 m_p->m_no_threads_waiting = 0;
107 for (i = 0; i<no_threads; i++)
110 m_p->m_thrds.add_thread(new boost::thread(w));
114 ThreadPoolSocketObserver::~ThreadPoolSocketObserver()
117 boost::mutex::scoped_lock input_lock(m_p->m_mutex_input_data);
118 m_p->m_stop_flag = true;
119 m_p->m_cond_input_data.notify_all();
121 m_p->m_thrds.join_all();
123 m_p->m_socketObservable->deleteObserver(this);
126 void ThreadPoolSocketObserver::socketNotify(int event)
128 if (event & SOCKET_OBSERVE_READ)
132 recv(m_p->m_pipe.read_fd(), buf, 1, 0);
134 ssize_t r = read(m_p->m_pipe.read_fd(), buf, 1);
137 if (r == (ssize_t) (-1))
138 yaz_log(YLOG_WARN|YLOG_ERRNO,
139 "ThreadPoolSocketObserver::socketNotify. read fail");
142 "ThreadPoolSocketObserver::socketNotify. read returned 0");
147 boost::mutex::scoped_lock output_lock(m_p->m_mutex_output_data);
148 out = m_p->m_output.front();
149 m_p->m_output.pop_front();
155 std::ostringstream os;
157 boost::mutex::scoped_lock input_lock(m_p->m_mutex_input_data);
158 os << "tbusy/total " <<
159 m_p->m_no_threads - m_p->m_no_threads_waiting <<
160 "/" << m_p->m_no_threads
161 << " queue in/out " << m_p->m_input.size() << "/"
162 << m_p->m_output.size();
164 out->result(os.str().c_str());
169 void ThreadPoolSocketObserver::get_thread_info(int &tbusy, int &total)
171 tbusy = m_p->m_no_threads - m_p->m_no_threads_waiting;
172 total = m_p->m_no_threads;
175 void ThreadPoolSocketObserver::run(void *p)
179 IThreadPoolMsg *in = 0;
181 boost::mutex::scoped_lock input_lock(m_p->m_mutex_input_data);
182 m_p->m_no_threads_waiting++;
183 while (!m_p->m_stop_flag && m_p->m_input.size() == 0)
184 m_p->m_cond_input_data.wait(input_lock);
185 m_p->m_no_threads_waiting--;
186 if (m_p->m_stop_flag)
189 in = m_p->m_input.front();
190 m_p->m_input.pop_front();
191 m_p->m_cond_input_full.notify_all();
193 IThreadPoolMsg *out = in->handle();
195 boost::mutex::scoped_lock output_lock(m_p->m_mutex_output_data);
196 m_p->m_output.push_back(out);
198 send(m_p->m_pipe.write_fd(), "", 1, 0);
200 ssize_t r = write(m_p->m_pipe.write_fd(), "", 1);
203 if (r == (ssize_t) (-1))
204 yaz_log(YLOG_WARN|YLOG_ERRNO,
205 "ThreadPoolSocketObserver::run. write fail");
208 "ThreadPoolSocketObserver::run. write returned 0");
215 void ThreadPoolSocketObserver::cleanup(IThreadPoolMsg *m, void *info)
217 boost::mutex::scoped_lock input_lock(m_p->m_mutex_input_data);
219 std::deque<IThreadPoolMsg *>::iterator it = m_p->m_input.begin();
220 while (it != m_p->m_input.end())
222 if ((*it)->cleanup(info))
223 it = m_p->m_input.erase(it);
229 void ThreadPoolSocketObserver::put(IThreadPoolMsg *m)
231 boost::mutex::scoped_lock input_lock(m_p->m_mutex_input_data);
233 while (m_p->m_input.size() >= m_p->m_no_threads * queue_size_per_thread)
234 m_p->m_cond_input_full.wait(input_lock);
235 m_p->m_input.push_back(m);
236 m_p->m_cond_input_data.notify_one();
242 * c-file-style: "Stroustrup"
243 * indent-tabs-mode: nil
245 * vim: shiftwidth=4 tabstop=8 expandtab