diff --git a/Doxyfile b/Doxyfile index 28da650..2bfcdf5 100644 --- a/Doxyfile +++ b/Doxyfile @@ -1386,7 +1386,7 @@ MAN_LINKS = NO # generate an XML file that captures the structure of # the code including all documentation. -GENERATE_XML = NO +GENERATE_XML = YES # The XML_OUTPUT tag is used to specify where the XML pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be diff --git a/doc2confluence.py b/doc2confluence.py new file mode 100755 index 0000000..b16000e --- /dev/null +++ b/doc2confluence.py @@ -0,0 +1,261 @@ +#!/usr/bin/env python +# encoding: utf-8 +""" +Utility script to import doxygen docs into confluence + +Original Author(s): Richard Bateman +Created: 18 October 2009 +License: Dual license model; choose one of two: + New BSD License + http://www.opensource.org/licenses/bsd-license.php + - or - + GNU Lesser General Public License, version 2.1 + http://www.gnu.org/licenses/lgpl-2.1.html + +Copyright 2009 the Firebreath development team +""" +import os, sys, SOAPpy +from xml.dom import minidom +from itertools import izip + +nameCount = {} + +class Doxygen2Confluence: + nameCount = {} + inputHtmlPath = os.path.join("docs", "html") + outputHtmlPath = os.path.join("docs", "patched") + inputList = {} + pathMap = {} + baseUrl = "/confluence/display/HTC/%s" + classDocsUrl = "http://wiki.my-ho.st/" + url = "http://wiki.my-ho.st/confluence/rpc/soap-axis/confluenceservice-v2?wsdl" + server = SOAPpy.SOAPProxy(url) + rpc = SOAPpy.WSDL.Proxy(url) + token = "" + space = "HTC" + topPages = { + "class" : "7012748", + "struct" : "7012750", + "namespace" : "7012752", + "file" : "7012754", + "typedef": "7012756", + "function": "7012760", + "enum": "7012758", + "example": "7012762", + } + parents = {} + createdPages = [] + username = "" + password = "" + + def login(self): + self.token = self.rpc.login(self.username, self.password) + + def __init__(self, username, password): + SOAPpy.Parser._parseSOAP = self.confluence_soap_parser + self.username = username + self.password = password + self.login() + + def getName(self, name): + count = 1 + retVal = name.replace("::", " ") + if name in self.nameCount: + count = self.nameCount[name] + count = count + 1 + retVal = "%s (%s)" % (name, count) + + self.nameCount[name] = count + return retVal.replace("<", "(").replace(">", ")").replace("/", " ") + + def makeFirstPageInConfluence(self, pageId, targetPageId): + children = self.rpc.getChildren(self.token, SOAPpy.Types.longType(long(pageId))) + if len(children) and children[0]["id"] != targetPageId: + print "Moving %s to before %s" % (targetPageId, children[0]["id"]) + self.rpc.movePage(self.token, SOAPpy.Types.longType(long(targetPageId)), SOAPpy.Types.longType(long(children[0]["id"])), "above") + + def exportToConfluence(self, refId, pageName, kind): + try: + page = self.rpc.getPage(self.token, self.space, pageName) + except: + try: + self.login() + page = self.rpc.getPage(self.token, self.space, pageName) + except: + page = {"space": self.space, "title": pageName} + + if kind == "file": + filename = "%s_source.html" % refId + else: + filename = "%s.html" % refId + + npage = { + "content": "{doxygen-init}{html-include:url=http://wiki.my-ho.st/doco/%s/%s}" % (self.space, filename), + "space": page["space"], + "title": page["title"], + } + + if hasattr(page, 'id'): + npage["id"] = SOAPpy.Types.longType(long(page["id"])) + npage["parentId"] = SOAPpy.Types.longType(long(self.parents[refId])) + npage["version"] = SOAPpy.Types.intType(int(page["version"])) + + n = 0 + while n < 10: + try: + npage["content"] = self.rpc.convertWikiToStorageFormat(self.token, npage['content']) + npage = self.rpc.storePage(self.token, npage) + self.createdPages.append(npage["id"]) + self.rpc.setContentPermissions(self.token, SOAPpy.Types.longType(long(npage["id"])), "Edit", [ {'groupName': 'confluence-administrators', 'type': 'Edit'} ]) + break; + except Exception as ex: + self.login() + print type(ex) + print ex.args + print ex + pass + + return npage["id"] + + def cleanConfluence(self): + for kind, id in self.topPages.items(): + print "Scanning pages for %s (id %s)" % (kind, id) + pages = self.rpc.getDescendents(self.token, SOAPpy.Types.longType(long(id))) + for page in pages: + if (page["id"] not in self.createdPages) and (page["id"] not in self.topPages.values()): + print "Removing defunct page: %s (%s)" % (page["title"], page["id"]) + self.rpc.removePage(self.token, SOAPpy.Types.longType(long(page["id"]))) + + def processDirectory(self, path): + xml = minidom.parse("docs/xml/index.xml") + + compounds = xml.documentElement.getElementsByTagName("compound") + refidMap = {} + Info = {} + for com in compounds: + refid = com.getAttribute("refid") + kind = com.getAttribute("kind") + compoundName = com.getElementsByTagName("name")[0].firstChild.wholeText + realName = self.getName("%s %s" % (kind, compoundName.replace("::", " "))) + if os.path.exists(os.path.join(path, "%s-members.html" % refid)): + refidMap["%s-members.html" % refid] = self.baseUrl % (realName + " Members") + filename = "%s.html" % refid + if kind == "file": + filename = "%s_source.html" % refid + if os.path.exists(os.path.join(path, filename)): + Info[refid] = {} + Info[refid]["kind"] = kind + Info[refid]["name"] = realName + Info[refid]["members"] = {} + refidMap[filename] = self.baseUrl % realName + if kind == "file": + print "%s => %s" % (filename, self.baseUrl % realName) + continue + for mem in com.getElementsByTagName("member"): + memName = mem.getElementsByTagName("name")[0].firstChild.wholeText + memRefId = mem.getAttribute("refid") + memRefId = memRefId[0:memRefId.rindex("_")] + memKind = mem.getAttribute("kind") + if memKind == "enumvalue": + continue + + if (os.path.exists(os.path.join(path, memRefId + ".html"))): + if kind == "namespace": + localName = self.getName("%s %s %s" % (memKind, compoundName, memName)) + else: + localName = self.getName(Info[refid]["name"] + " " + memName) + refidMap["%s.html" % memRefId] = self.baseUrl % localName + Info[refid]["members"][memRefId] = {} + Info[refid]["members"][memRefId]["kind"] = memKind + Info[refid]["members"][memRefId]["name"] = localName + self.inputList = Info + self.pathMap = refidMap + + def processFile(self, filename, inPath, outPath): + f = open(os.path.join(inPath, filename), "r") + fileText = f.read() + f.close() + + for id, url in izip(self.pathMap.keys(), self.pathMap.values()): + print "Changing %s to %s" % (id, url) + try: + fileText = fileText.replace(id, url) + except UnicodeDecodeError: + fileText = fileText.replace(id.encode('utf8'), url.encode('utf8')) + fileText = fileText.replace(r'img src="', r'img src="http://wiki.my-ho.st/doco/%s/' % self.space) + fileText = fileText.replace(r'img class="footer" src="', r'img class="footer" src="http://wiki.my-ho.st/doco/') + nf = open(os.path.join(outPath, filename), "w") + nf.write(fileText) + nf.close() + + def writeNewFiles(self, inPath, outPath): + if not os.path.exists(outPath): + os.mkdir(outPath) + + self.processFile("annotated.html", inPath, outPath) + self.processFile("hierarchy.html", inPath, outPath) + self.processFile("files.html", inPath, outPath) + self.processFile("namespaces.html", inPath, outPath) + self.processFile("classes.html", inPath, outPath) + self.processFile("index.html", inPath, outPath) + self.processFile("examples.html", inPath, outPath) + self.processFile("functions.html", inPath, outPath) + # Now we're going to load the files, process them, and write them to the output directory + for refid, item in self.inputList.items(): + filename = "%s.html" % refid + if item["kind"] == "file": + filename = "%s_source.html" % refid + if os.path.exists(os.path.join(inPath, "%s-members.html" % refid)): + self.processFile("%s-members.html" % refid, inPath, outPath) + #print "Opening file %s" % filename + self.processFile(filename, inPath, outPath) + for memid, mem in item["members"].items(): + #print "Member: %s" % memid + self.processFile("%s.html" % memid, inPath, outPath) + + + def begin(self): + self.processDirectory(self.inputHtmlPath) + self.writeNewFiles(self.inputHtmlPath, self.outputHtmlPath) + + for refid, item in self.inputList.items(): + parentId = None + if item["kind"] in self.topPages: + parentId = self.topPages[item["kind"]] + else: + print "Could not find %s in " % item["kind"], self.topPages + continue + self.parents[refid] = parentId + print "Exporting %s to confluence..." % item["name"] + pageId = self.exportToConfluence(refid, item["name"], item["kind"]) + for memid, mem in item["members"].items(): + #print "Exporting %s to confluence..." % mem["name"] + if item["kind"] == "namespace" and mem["kind"] in self.topPages: + self.parents[memid] = self.topPages[mem["kind"]] + else: + self.parents[memid] = pageId + self.exportToConfluence(memid, mem["name"], mem["kind"]) + if os.path.exists(os.path.join(self.inputHtmlPath, "%s-members.html" % refid)): + self.parents["%s-members" % refid] = pageId + membersPageId = self.exportToConfluence("%s-members" % refid, "%s Members" % item["name"], "members") + self.makeFirstPageInConfluence(pageId, membersPageId) + self.cleanConfluence() + + # This parser is due to this bug https://jira.atlassian.com/browse/CONF-6720 + # once that bug is fixed this parser can be retired + def confluence_soap_parser(self, xml_str, rules=None, parser=SOAPpy.Parser._parseSOAP): + attribute = 'xsi:type="soapenc:Array"' + xml_str = xml_str.replace('%s %s' % (attribute, attribute), attribute) + return parser(xml_str, rules=rules) + + +def Main(): + """ + Parse the commandline and execute the appropriate actions. + """ + a = Doxygen2Confluence(sys.argv[1], sys.argv[2]) + a.begin() + +if __name__ == "__main__": + Main() + diff --git a/http_request.cpp b/http_request.cpp index f98220a..6715581 100644 --- a/http_request.cpp +++ b/http_request.cpp @@ -1,7 +1,24 @@ #include #include +#include +#include +#include +#include +#include #include "http_request.h" +using namespace DynamX::HttpClient; +using namespace DynamX::HttpClient::Logging; +using namespace boost::archive::iterators; + +typedef insert_linebreaks >, 72 > it_base64_t; + +std::string Base64Encode(std::string s) { + std::string base64(it_base64_t(s.begin()),it_base64_t(s.end())); + base64.append((3-s.length()%3)%3, '='); + return base64; +} + std::string get_env(std::string const& name) { @@ -113,8 +130,15 @@ void http_request::send() } } request += ' ' + this->version + "\r\n"; + /* send the headers if needed */ for (std::map::iterator header = this->headers.begin(); header != this->headers.end(); ++header) request += header->first + ": " + header->second + "\r\n"; + /* if we have a username, password stored, send that */ + if (this->httpauth.first.length()>0) + request += "Authorization: Basic " + Base64Encode(this->httpauth.first +":"+this->httpauth.second) + "\r\n"; + if ((this->http_proxy == HTTP_PROXY) && (this->proxyauth.first.length() > 0)) + request += "Proxy-Authorization: Basic " + Base64Encode(this->proxyauth.first + ":" + this->proxyauth.second) + "\r\n"; + request += "\r\n" + this->body; LogTrace() << "Sending: " << request; this->socksend(request); @@ -173,7 +197,7 @@ void http_request::send(std::string absolute_url) this->http_type = SSL_HTTPS; if (port.empty()) port = "443"; - + char *data = new char[1024]; this->host = url_parts[0] + "://" + url_parts[1] + ":" + (url_parts[2].empty() ? "443" : url_parts[2]); this->targeturl = url_parts[1] + ":" + port; @@ -231,10 +255,13 @@ void http_request::send(std::string absolute_url) /* if we are connecting through a proxy.... */ if (this->http_proxy == HTTP_PROXY) { /* send our Proxy Headers */ - std::string proxycmd = "CONNECT " + this->targeturl + " " + this->version + "\r\n\r\n"; - char *data = new char[1024]; + std::string proxycmd = "CONNECT " + this->targeturl + " " + this->version + "\r\n"; + if (this->proxyauth.first.length() > 0) + proxycmd += "Proxy-Authorization: Basic " + Base64Encode(this->proxyauth.first + ":" + this->proxyauth.second) + "\r\n"; + proxycmd += "\r\n"; LogTrace() << "Proxy Command: " << proxycmd; this->sslsocket.next_layer().send(boost::asio::buffer(proxycmd.c_str(), proxycmd.length())); + char *data = new char[1024]; while (this->sslsocket.next_layer().read_some(boost::asio::buffer(data, 1024))) { std::string proxyresponse(data); //typedef split_vector; @@ -489,8 +516,6 @@ http_response *http_request::connect(std::string url) return this->response; } - - bool http_request::Starttransfer(std::string url) { boost::packaged_task TransferStatus(boost::bind(&http_request::connect, this, url)); this->Status = TransferStatus.get_future(); @@ -505,3 +530,20 @@ void http_request::callback(http_response *res) { if (this->CallbackFunction) CallbackFunction(res); } + +bool http_request::setHeader(std::string name, std::string value) { + if (this->headers.find(name) == this->headers.end()) + return false; + std::pair::iterator,bool> ret = this->headers.insert(std::pair(name, value)); + return ret.second; +} +bool http_request::setHTTPAuth(std::string username, std::string password) { + this->httpauth.first = username; + this->httpauth.second = password; + return true; +} +bool http_request::setProxyAuth(std::string username, std::string password) { + this->proxyauth.first = username; + this->proxyauth.second = password; + return true; +} diff --git a/http_request.h b/http_request.h index 07c0110..6dcd295 100644 --- a/http_request.h +++ b/http_request.h @@ -10,6 +10,8 @@ #include #include + + #include #include #include @@ -18,10 +20,12 @@ #include "http_response.h" - /** @file */ +namespace DynamX { +namespace HttpClient { + /*! \mainpage HTTP Client * * \section introduction Introduction @@ -75,8 +79,16 @@ * \brief the Boost Date/Time Classes * * The Boost Date and Time Class Documentation can be read here: http://www.boost.org/libs/datetime/ + * + * \namespace DynamX::HttpClient + * \brief the Main Namespace for the HTTPClient Library + * + * \namespace DynamX::HttpClient::Logging + * \brief the Logging Namespace for the Library */ +class http_response; + /*! \brief This is the main Class that Controls all downloads. @@ -125,8 +137,6 @@ * \endcode */ -class http_response; - class http_request { public: @@ -142,7 +152,6 @@ public: * * Destruct the http_request Class */ - ~http_request(); /*! \brief Reset the Requests to prepare for a new transfer * @@ -179,6 +188,34 @@ public: * the http_response class with the results of the request. */ boost::unique_future Status; + /*! \brief set a Header to send to the Server when a request is made + * + * a function to set a custom header to send to the Server when a request is made + * + * \param[in] name the name of the header to send + * \param[in] value the value of the header to send + * \return a bool indicating success + */ + bool setHeader(std::string name, std::string value); + /*! \brief set the Username/Password to use to authenticate to the HTTP Server + * + * sets the username and password to use to authenticate to the HTTP Server + * + * @param username the username to use + * @param password the password to use + * @return a bool indicating success or failure + */ + bool setHTTPAuth(std::string username, std::string password); + + /*! \brief set the Username and Password to use to authenticate to a HTTP Proxy + * + * sets the username and password to use to authenticate to a HTTP Proxy + * + * @param username the username to use + * @param password the password to use + * @return a bool indicating success or failure + */ + bool setProxyAuth(std::string username, std::string password); class connection_exception: public std::exception { }; class server_connection_exception: public std::exception { }; class policy_file_request_exception: public std::exception { }; @@ -203,6 +240,8 @@ private: std::string description; std::map arguments; std::map headers; + std::pair httpauth; + std::pair proxyauth; std::string body; http_response *response; t_callbackFunc CallbackFunction; @@ -219,6 +258,7 @@ private: boost::asio::io_service *postbackio; }; +namespace Logging { /*! \brief Return a String with the current timestamp */ @@ -442,5 +482,8 @@ class HTTPLOG_DECLSPEC HTTPLog : public LogClass {}; */ #define LogCritical() if (LOG_CRITICAL >= HTTPLog::ReportingLevel()) HTTPLog().Get(LOG_CRITICAL) LogFormat() +} +} +} #endif // HTTP_REQUEST_H diff --git a/http_response.cpp b/http_response.cpp index ca56ea5..560fb24 100644 --- a/http_response.cpp +++ b/http_response.cpp @@ -2,6 +2,8 @@ #include +using namespace DynamX::HttpClient; + http_response::http_response(): body_size(0) { this->reset(); @@ -97,4 +99,4 @@ size_t http_response::getProgress() { boost::interprocess::scoped_lock(this->TLock); return this->progress; } -} + diff --git a/http_response.h b/http_response.h index 14c6a38..32a6844 100644 --- a/http_response.h +++ b/http_response.h @@ -3,14 +3,15 @@ #include - - #include #include #include "http_request.h" /** @file */ +namespace DynamX { +namespace HttpClient { + /*! \brief Basic Memory Only HTTP Result Class * * This is a basic http_response class that is used to store the results of a HTTP Transfer in memory and pass the results back to the client @@ -81,7 +82,7 @@ public: std::map::iterator getHeadersEnd(); /*! \brief Return the value associated with a Header * - * This returns the value associated with a particular header who´s name is passed as the first param. Returns empty if no such header exists + * This returns the value associated with a particular header whos name is passed as the first param. Returns empty if no such header exists * * @param[in] name The header value to return * @return a std::string containing the header value, or empty if no such header exists. @@ -197,4 +198,8 @@ private: boost::mutex TLock; }; +} +} + + #endif // HTTP_RESPONSE_H