Move some common slots to the base class (so they can be shared)
This commit is contained in:
parent
8d012115f0
commit
d91a45c328
2 changed files with 169 additions and 0 deletions
|
@ -18,6 +18,13 @@
|
|||
*/
|
||||
|
||||
#include "servicebase.h"
|
||||
#include "libdanbooru_debug.h"
|
||||
#include "utils.h"
|
||||
|
||||
#include <KIO/Job>
|
||||
#include <KIO/Scheduler>
|
||||
|
||||
using KIO::StoredTransferJob;
|
||||
|
||||
|
||||
namespace Danbooru {
|
||||
|
@ -195,5 +202,161 @@ void DanbooruServiceBase::setUserName(const QString &username)
|
|||
m_username = username;
|
||||
}
|
||||
|
||||
void DanbooruServiceBase::processPixmap(KJob* job) {
|
||||
|
||||
if (job->error())
|
||||
{
|
||||
Q_EMIT(downloadError(job->errorString()));
|
||||
return;
|
||||
}
|
||||
|
||||
StoredTransferJob *jobResult = qobject_cast<StoredTransferJob *>(job);
|
||||
|
||||
if (jobResult == 0) {
|
||||
Q_EMIT(downloadError(QString("Internal error")));
|
||||
return;
|
||||
|
||||
}
|
||||
|
||||
QByteArray data = jobResult->data();
|
||||
Danbooru::DanbooruPost* post = job->property("post").value<DanbooruPost*>();
|
||||
auto pix = job->property("pixmap").value<QPixmap>();
|
||||
|
||||
if (!pix.loadFromData(jobResult->data()))
|
||||
{
|
||||
Q_EMIT(downloadError(QString("Pixmap data could not be loaded")));
|
||||
return;
|
||||
}
|
||||
|
||||
post->setPixmap(pix);
|
||||
|
||||
if (m_cache)
|
||||
{
|
||||
//qCDebug(LIBDANBOORU) << "Inserting item in cache";
|
||||
m_cache->insertPixmap(post->thumbnailUrl().url(), pix);
|
||||
}
|
||||
|
||||
m_postsToFetch--; // One less post to do
|
||||
|
||||
qCDebug(LIBDANBOORU) << "Current posts remaining: " << m_postsToFetch;
|
||||
Q_EMIT(postDownloaded(post));
|
||||
|
||||
if (m_postsToFetch == 0)
|
||||
{
|
||||
qCDebug(LIBDANBOORU) << "Post download finished";
|
||||
Q_EMIT(postDownloadFinished());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void DanbooruServiceBase::processPostList(KJob *job)
|
||||
{
|
||||
|
||||
qCDebug(LIBDANBOORU) << "Got post data OK";
|
||||
|
||||
if (job->error()) {
|
||||
Q_EMIT(downloadError(job->errorString()));
|
||||
}
|
||||
|
||||
StoredTransferJob *jobResult = qobject_cast<StoredTransferJob *>(job);
|
||||
|
||||
if (jobResult == 0) {
|
||||
Q_EMIT(downloadError(QString("Internal error")));
|
||||
return;
|
||||
|
||||
}
|
||||
|
||||
QByteArray data = jobResult->data();
|
||||
|
||||
bool ok;
|
||||
bool is_pool = job->property("is_pool").toBool();
|
||||
|
||||
QList<QVariantMap> postList;
|
||||
|
||||
if (is_pool) {
|
||||
// Special cases for pools
|
||||
QVariantMap postMap = parseResult(data, apiType(), Danbooru::Pool, &ok).at(0);
|
||||
auto postData = postMap.value("raw_post_data").toList();
|
||||
for (const auto post: postData) {
|
||||
postList.append(extractPostData(post, apiType()));
|
||||
}
|
||||
|
||||
} else {
|
||||
postList = parseResult(data, apiType(), Danbooru::Post, &ok);
|
||||
}
|
||||
|
||||
if (!ok) {
|
||||
Q_EMIT(downloadError(QString("Unable to decode data")));
|
||||
return;
|
||||
}
|
||||
|
||||
// How many posts do we have to fetch?
|
||||
|
||||
if (postList.isEmpty()) {
|
||||
qCDebug(LIBDANBOORU) << "No posts found";
|
||||
Q_EMIT(postDownloadFinished());
|
||||
return;
|
||||
}
|
||||
|
||||
m_postsToFetch = postList.length();
|
||||
qCDebug(LIBDANBOORU) << "Found " << m_postsToFetch << "posts to fetch" << "with limit" << m_maxPosts;
|
||||
|
||||
// This is mostly needed for pools
|
||||
if (postList.length() > m_maxPosts) {
|
||||
m_postsToFetch = m_maxPosts;
|
||||
postList = postList.mid(0, m_maxPosts);
|
||||
}
|
||||
|
||||
for (const QVariantMap element : qAsConst(postList)) {
|
||||
|
||||
DanbooruPost *post = new DanbooruPost(element);
|
||||
|
||||
// Remove unwanted posts
|
||||
|
||||
if (isPostBlacklisted(post, m_blacklist, m_maxRating)) {
|
||||
m_postsToFetch--;
|
||||
delete post;
|
||||
continue;
|
||||
}
|
||||
|
||||
QPixmap pix;
|
||||
|
||||
qCDebug(LIBDANBOORU) << "About to download images";
|
||||
|
||||
if (m_cache && m_cache->findPixmap(post->thumbnailUrl().url(), &pix)) {
|
||||
|
||||
post->setPixmap(pix);
|
||||
Q_EMIT(postDownloaded(post));
|
||||
m_postsToFetch--;
|
||||
|
||||
if (m_postsToFetch == 0) {
|
||||
qCDebug(LIBDANBOORU) << "Post download finished";
|
||||
Q_EMIT(postDownloadFinished());
|
||||
}
|
||||
|
||||
} else {
|
||||
|
||||
qCDebug(LIBDANBOORU) << "Downloading image" << post->thumbnailUrl();
|
||||
StoredTransferJob *pixmapJob = KIO::storedGet(post->thumbnailUrl(),
|
||||
KIO::NoReload, KIO::HideProgressInfo);
|
||||
|
||||
// We don't want to overload the servers, so set some rational
|
||||
// priority
|
||||
|
||||
KIO::Scheduler::setJobPriority(static_cast<KIO::SimpleJob *>(pixmapJob), 1);
|
||||
|
||||
QVariant variant;
|
||||
variant.setValue(post);
|
||||
pixmapJob->setProperty("post", variant);
|
||||
pixmapJob->setProperty("pix", pix);
|
||||
|
||||
connect(pixmapJob, &StoredTransferJob::result, this, &DanbooruServiceBase::processPixmap);
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
} // namespace Danbooru
|
||||
|
||||
|
|
|
@ -38,6 +38,8 @@
|
|||
|
||||
#include <KImageCache>
|
||||
|
||||
class KJob;
|
||||
|
||||
|
||||
namespace Danbooru
|
||||
{
|
||||
|
@ -351,6 +353,10 @@ Q_SIGNALS:
|
|||
* downloaded.
|
||||
**/
|
||||
void tagDownloaded(Danbooru::DanbooruTag *tag);
|
||||
|
||||
public Q_SLOTS:
|
||||
void processPixmap(KJob* job);
|
||||
void processPostList(KJob* job);
|
||||
|
||||
};
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue