Add a header for the backend interface functions
Test / build (push) Successful in 43s

This commit is contained in:
2026-04-28 12:40:36 +02:00
parent 1bc1f4e4d2
commit 12dc26051c
11 changed files with 894 additions and 1001 deletions
+6 -6
View File
@@ -8,10 +8,10 @@
#include "dl.h" #include "dl.h"
#include "utils.h" #include "utils.h"
namespace slack { namespace katja
{
/** /**
* slack::Dl::collect_cache_info: * katja::Dl::collect_cache_info:
* @tmpl: temporary directory for downloading the files. * @tmpl: temporary directory for downloading the files.
* *
* Download files needed to get the information like the list of packages * Download files needed to get the information like the list of packages
@@ -59,7 +59,7 @@ Dl::collect_cache_info (const char *tmpl) noexcept
} }
/** /**
* slack::Dl::generate_cache: * katja::Dl::generate_cache:
* @job_data: A #JobData. * @job_data: A #JobData.
* @tmpl: temporary directory for downloading the files. * @tmpl: temporary directory for downloading the files.
* *
@@ -253,7 +253,7 @@ Dl::~Dl () noexcept
} }
/** /**
* slack::Dl::Dl: * katja::Dl::Dl:
* @name: Repository name. * @name: Repository name.
* @mirror: Repository mirror. * @mirror: Repository mirror.
* @order: Repository order. * @order: Repository order.
@@ -262,7 +262,7 @@ Dl::~Dl () noexcept
* *
* Constructor. * Constructor.
* *
* Return value: New #slack::Dl. * Return value: New #katja::Dl.
**/ **/
Dl::Dl (const char *name, const char *mirror, Dl::Dl (const char *name, const char *mirror,
std::uint8_t order, const char *blacklist, char *index_file) noexcept std::uint8_t order, const char *blacklist, char *index_file) noexcept
+2 -2
View File
@@ -9,8 +9,8 @@
#include "pkgtools.h" #include "pkgtools.h"
#include "utils.h" #include "utils.h"
namespace slack { namespace katja
{
class Dl final : public Pkgtools class Dl final : public Pkgtools
{ {
public: public:
+841 -23
View File
@@ -3,36 +3,184 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this * License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. * file, You can obtain one at https://mozilla.org/MPL/2.0/.
*/ */
#include "job.h" #include <dirent.h>
#include <glib/gstdio.h>
#include <string> #include <cstdint>
#include "utils.h" #include <stdlib.h>
#include <stdio.h>
#include <zlib.h>
#include <curl/curl.h>
#include <pk-backend.h> #include <pk-backend.h>
#include <pk-backend-job.h> #include <sqlite3.h>
#include "job.h"
#include "dl.h"
#include "pkgtools.h"
#include "slackpkg.h"
#include "utils.h"
namespace slack { static GSList *repos = nullptr;
/** void pk_backend_initialize(GKeyFile *conf)
* Returns true if the package isn't filtered out by the filters, false
* otherwise.
*/
bool
filter_package (PkBitfield filters, bool is_installed)
{ {
if ((is_installed && !pk_bitfield_contain (filters, PK_FILTER_ENUM_NOT_INSTALLED)) char *path, **groups;
|| (!is_installed && !pk_bitfield_contain (filters, PK_FILTER_ENUM_INSTALLED))) int ret;
gushort i;
gsize groups_len;
GFile *conf_file;
GFileInfo *file_info;
GKeyFile *key_conf;
GError *err = nullptr;
void *repo = nullptr;
sqlite3 *db;
sqlite3_stmt *stmt;
g_debug("backend: initialize");
curl_global_init(CURL_GLOBAL_DEFAULT);
/* Open the database. We will need it to save the time the configuration file was last modified. */
path = g_build_filename(LOCALSTATEDIR, "cache", "PackageKit", "metadata", "metadata.db", nullptr);
if (sqlite3_open(path, &db) != SQLITE_OK)
{ {
return true; g_error("%s: %s", path, sqlite3_errmsg(db));
} }
return false; g_free(path);
/* Read the configuration file */
key_conf = g_key_file_new();
path = g_build_filename(SYSCONFDIR, "PackageKit", "Slackware.conf", nullptr);
g_key_file_load_from_file(key_conf, path, G_KEY_FILE_NONE, &err);
if (err)
{
g_error("%s: %s", path, err->message);
g_error_free(err);
}
conf_file = g_file_new_for_path(path);
if (!(file_info = g_file_query_info(conf_file,
"time::modified-usec",
G_FILE_QUERY_INFO_NOFOLLOW_SYMLINKS,
nullptr,
&err)))
{
g_error("%s", err->message);
g_error_free(err);
}
if ((ret = sqlite3_prepare_v2(db,
"UPDATE cache_info SET value = ? WHERE key LIKE 'last_modification'",
-1,
&stmt,
nullptr)) == SQLITE_OK) {
ret = sqlite3_bind_int(stmt, 1, g_file_info_get_attribute_uint32(file_info, "time::modified-usec"));
if (ret == SQLITE_OK)
{
ret = sqlite3_step(stmt);
}
sqlite3_finalize(stmt);
}
if ((ret != SQLITE_OK) && (ret != SQLITE_DONE))
{
g_error("%s: %s", path, sqlite3_errstr(ret));
}
else if (!sqlite3_changes(db))
{
g_error("Failed to update database: %s", path);
}
g_object_unref(file_info);
g_object_unref(conf_file);
sqlite3_close_v2(db);
g_free(path);
/* Initialize an object for each well-formed repository */
groups = g_key_file_get_groups(key_conf, &groups_len);
for (i = 0; i < groups_len; i++)
{
char *blacklist = g_key_file_get_string(key_conf, groups[i], "Blacklist", nullptr);
char *mirror = g_key_file_get_string(key_conf, groups[i], "Mirror", nullptr);
if (g_key_file_has_key(key_conf, groups[i], "Priority", nullptr))
{
repo = new Slackpkg (groups[i], mirror, i + 1, blacklist,
g_key_file_get_string_list(key_conf, groups[i], "Priority", nullptr, nullptr));
}
else if (g_key_file_has_key(key_conf, groups[i], "IndexFile", nullptr))
{
repo = new Dl (groups[i], mirror, i + 1, blacklist,
g_key_file_get_string(key_conf, groups[i], "IndexFile", nullptr));
}
if (repo)
{
repos = g_slist_append(repos, repo);
}
else
{
g_free(groups[i]);
}
g_free(mirror);
g_free(blacklist);
}
g_free(groups);
g_key_file_free(key_conf);
} }
void pk_backend_destroy()
{
g_debug("backend: destroy");
for (GSList *l = repos; l; l = g_slist_next (l))
{
delete static_cast<Pkgtools *> (l->data);
}
g_slist_free (repos);
curl_global_cleanup ();
} }
void void pk_backend_start_job(PkBackendJob *job)
{
char *db_filename = nullptr;
JobData *job_data = g_new0(JobData, 1);
db_filename = g_build_filename(LOCALSTATEDIR, "cache", "PackageKit", "metadata", "metadata.db", nullptr);
if (sqlite3_open(db_filename, &job_data->db) == SQLITE_OK) { /* Some SQLite settings */
sqlite3_exec(job_data->db, "PRAGMA foreign_keys = ON", nullptr, nullptr, nullptr);
}
else
{
pk_backend_job_error_code(job, PK_ERROR_ENUM_NO_CACHE,
"%s: %s",
db_filename,
sqlite3_errmsg(job_data->db));
goto out;
}
pk_backend_job_set_user_data(job, job_data);
out:
g_free(db_filename);
}
void pk_backend_stop_job(PkBackendJob *job)
{
auto job_data = static_cast<JobData *> (pk_backend_job_get_user_data(job));
if (job_data->curl)
{
curl_easy_cleanup(job_data->curl);
}
sqlite3_close(job_data->db);
g_free(job_data);
pk_backend_job_set_user_data(job, nullptr);
}
static void
pk_backend_search_thread (PkBackendJob *job, GVariant *params, void *user_data) pk_backend_search_thread (PkBackendJob *job, GVariant *params, void *user_data)
{ {
auto job_data = reinterpret_cast<slack::JobData *> (pk_backend_job_get_user_data (job)); auto job_data = reinterpret_cast<katja::JobData *> (pk_backend_job_get_user_data (job));
char **vals; char **vals;
PkBitfield filters; PkBitfield filters;
@@ -51,17 +199,17 @@ pk_backend_search_thread (PkBackendJob *job, GVariant *params, void *user_data)
/* Now we're ready to output all packages */ /* Now we're ready to output all packages */
while (sqlite3_step (stmt) == SQLITE_ROW) while (sqlite3_step (stmt) == SQLITE_ROW)
{ {
slack::Info info = slack::is_installed ( katja::Info info = katja::is_installed (
reinterpret_cast<const char *> (sqlite3_column_text (stmt, 2))); reinterpret_cast<const char *> (sqlite3_column_text (stmt, 2)));
if ((info == slack::Info::installed || info == slack::Info::updating) if ((info == katja::Info::installed || info == katja::Info::updating)
&& slack::filter_package (filters, true)) && !pk_bitfield_contain (filters, PK_FILTER_ENUM_NOT_INSTALLED))
{ {
pk_backend_job_package (job, slack::Info::installed, pk_backend_job_package (job, katja::Info::installed,
reinterpret_cast<const char *> (sqlite3_column_text (stmt, 0)), reinterpret_cast<const char *> (sqlite3_column_text (stmt, 0)),
reinterpret_cast<const char *> (sqlite3_column_text (stmt, 1))); reinterpret_cast<const char *> (sqlite3_column_text (stmt, 1)));
} }
else if (info == slack::Info::installing && slack::filter_package (filters, false)) else if (info == katja::Info::installing && !pk_bitfield_contain (filters, PK_FILTER_ENUM_INSTALLED))
{ {
pk_backend_job_package(job, PK_INFO_ENUM_AVAILABLE, pk_backend_job_package(job, PK_INFO_ENUM_AVAILABLE,
reinterpret_cast<const char *> (sqlite3_column_text (stmt, 0)), reinterpret_cast<const char *> (sqlite3_column_text (stmt, 0)),
@@ -79,3 +227,673 @@ pk_backend_search_thread (PkBackendJob *job, GVariant *params, void *user_data)
sqlite3_free (query); sqlite3_free (query);
g_free (search); g_free (search);
} }
void pk_backend_search_names(PkBackendJob *job, char **values)
{
pk_backend_job_thread_create(job, pk_backend_search_thread, (void *) "name", nullptr);
}
void pk_backend_search_details(PkBackendJob *job, char **values)
{
pk_backend_job_thread_create(job, pk_backend_search_thread, (void *) "desc", nullptr);
}
void pk_backend_search_groups(PkBackendJob *job, char **values)
{
pk_backend_job_thread_create(job, pk_backend_search_thread, (void *) "cat", nullptr);
}
void pk_backend_search_files(PkBackendJob *job, char **values)
{
char *search;
char *query;
sqlite3_stmt *stmt;
Info ret;
auto job_data = static_cast<JobData *> (pk_backend_job_get_user_data(job));
pk_backend_job_set_percentage(job, 0);
search = g_strjoinv("%", values);
query = sqlite3_mprintf("SELECT (p.name || ';' || p.ver || ';' || p.arch || ';' || r.repo), p.summary, "
"p.full_name FROM filelist AS f NATURAL JOIN pkglist AS p NATURAL JOIN repos AS r "
"WHERE f.filename LIKE '%%%q%%' GROUP BY f.full_name", search);
if ((sqlite3_prepare_v2(job_data->db, query, -1, &stmt, nullptr) == SQLITE_OK))
{
/* Now we're ready to output all packages */
while (sqlite3_step(stmt) == SQLITE_ROW)
{
ret = is_installed((char*) sqlite3_column_text(stmt, 2));
if ((ret == Info::installed) || (ret == Info::updating))
{
pk_backend_job_package(job, PK_INFO_ENUM_INSTALLED,
(char*) sqlite3_column_text(stmt, 0),
(char*) sqlite3_column_text(stmt, 1));
}
else if (ret == PK_INFO_ENUM_INSTALLING)
{
pk_backend_job_package(job, PK_INFO_ENUM_AVAILABLE,
(char*) sqlite3_column_text(stmt, 0),
(char*) sqlite3_column_text(stmt, 1));
}
}
sqlite3_finalize(stmt);
}
else
{
pk_backend_job_error_code(job, PK_ERROR_ENUM_CANNOT_GET_FILELIST, "%s", sqlite3_errmsg(job_data->db));
}
sqlite3_free(query);
g_free(search);
pk_backend_job_set_percentage(job, 100);
}
void pk_backend_get_details(PkBackendJob *job, char **package_ids)
{
char *homepage = nullptr;
char** tokens;
gsize i;
GString *desc;
GRegex *expr;
GMatchInfo *match_info;
GError *err = nullptr;
sqlite3_stmt *stmt;
auto job_data = static_cast<JobData *> (pk_backend_job_get_user_data(job));
if ((sqlite3_prepare_v2(job_data->db,
"SELECT p.desc, p.cat, p.uncompressed FROM pkglist AS p NATURAL JOIN repos AS r "
"WHERE name LIKE @name AND r.repo LIKE @repo AND ext NOT LIKE 'obsolete'",
-1,
&stmt,
nullptr) != SQLITE_OK)) {
pk_backend_job_error_code(job, PK_ERROR_ENUM_CANNOT_GET_FILELIST, "%s", sqlite3_errmsg(job_data->db));
goto out;
}
tokens = pk_package_id_split(package_ids[0]);
sqlite3_bind_text(stmt, 1, tokens[PK_PACKAGE_ID_NAME], -1, SQLITE_TRANSIENT);
sqlite3_bind_text(stmt, 2, tokens[PK_PACKAGE_ID_DATA], -1, SQLITE_TRANSIENT);
g_strfreev(tokens);
if (sqlite3_step(stmt) != SQLITE_ROW)
goto out;
desc = g_string_new((char *) sqlite3_column_text(stmt, 0));
/* Regular expression for searching a homepage */
expr = g_regex_new("(?:http|ftp):\\/\\/[[:word:]\\/\\-\\.]+[[:word:]\\/](?=\\.?$)",
(GRegexCompileFlags)(G_REGEX_OPTIMIZE | G_REGEX_DUPNAMES),
(GRegexMatchFlags)(0),
&err);
if (err)
{
pk_backend_job_error_code(job, PK_ERROR_ENUM_UNKNOWN, "%s", err->message);
g_error_free(err);
goto out;
}
if (g_regex_match(expr, desc->str, (GRegexMatchFlags)0, &match_info))
{
homepage = g_match_info_fetch(match_info, 0); /* URL */
/* Remove the last sentence with the copied URL */
for (i = desc->len - 1; i > 0; i--)
{
if ((desc->str[i - 1] == '.') && (desc->str[i] == ' '))
{
g_string_truncate(desc, i);
break;
}
}
g_match_info_free(match_info);
}
g_regex_unref(expr);
/* Ready */
pk_backend_job_details(job,
package_ids[0],
nullptr,
nullptr,
pk_group_enum_from_string((char *) sqlite3_column_text(stmt, 1)),
desc->str,
homepage,
sqlite3_column_int(stmt, 2),
G_MAXUINT64);
g_free(homepage);
if (desc)
{
g_string_free(desc, true);
}
out:
sqlite3_finalize(stmt);
}
void pk_backend_resolve(PkBackendJob *job, char **packages)
{
char **val;
sqlite3_stmt *stmt;
PkInfoEnum ret;
auto job_data = static_cast<JobData *> (pk_backend_job_get_user_data(job));
pk_backend_job_set_percentage(job, 0);
if ((sqlite3_prepare_v2(job_data->db,
"SELECT (p1.name || ';' || p1.ver || ';' || p1.arch || ';' || r.repo), p1.summary, "
"p1.full_name FROM pkglist AS p1 NATURAL JOIN repos AS r "
"WHERE p1.name LIKE @search AND p1.repo_order = "
"(SELECT MIN(p2.repo_order) FROM pkglist AS p2 WHERE p2.name = p1.name GROUP BY p2.name)",
-1,
&stmt,
nullptr) == SQLITE_OK)) {
/* Output packages matching each pattern */
for (val = packages; *val; val++)
{
sqlite3_bind_text(stmt, 1, *val, -1, SQLITE_TRANSIENT);
while (sqlite3_step(stmt) == SQLITE_ROW)
{
if ((ret == Info::installed) || (ret == Info::updating))
{
pk_backend_job_package(job, PK_INFO_ENUM_INSTALLED,
(char*) sqlite3_column_text(stmt, 0),
(char*) sqlite3_column_text(stmt, 1));
}
else if (ret == PK_INFO_ENUM_INSTALLING)
{
pk_backend_job_package(job, PK_INFO_ENUM_AVAILABLE,
(char*) sqlite3_column_text(stmt, 0),
(char*) sqlite3_column_text(stmt, 1));
}
}
sqlite3_clear_bindings(stmt);
sqlite3_reset(stmt);
}
sqlite3_finalize(stmt);
} else {
pk_backend_job_error_code(job, PK_ERROR_ENUM_CANNOT_GET_FILELIST, "%s", sqlite3_errmsg(job_data->db));
}
pk_backend_job_set_percentage(job, 100);
}
void pk_backend_download_packages(PkBackendJob *job, char **package_ids, const char *directory)
{
char *path, *to_strv[] = {nullptr, nullptr};
unsigned i;
sqlite3_stmt *stmt;
auto job_data = static_cast<JobData *> (pk_backend_job_get_user_data(job));
if ((sqlite3_prepare_v2(job_data->db,
"SELECT summary, (full_name || '.' || ext) FROM pkglist NATURAL JOIN repos "
"WHERE name LIKE @name AND ver LIKE @ver AND arch LIKE @arch AND repo LIKE @repo",
-1,
&stmt,
nullptr) != SQLITE_OK))
{
pk_backend_job_error_code(job, PK_ERROR_ENUM_CANNOT_GET_FILELIST, "%s", sqlite3_errmsg(job_data->db));
goto out;
}
for (i = 0; package_ids[i]; ++i)
{
char **tokens = pk_package_id_split(package_ids[i]);
sqlite3_bind_text(stmt, 1, tokens[PK_PACKAGE_ID_NAME], -1, SQLITE_TRANSIENT);
sqlite3_bind_text(stmt, 2, tokens[PK_PACKAGE_ID_VERSION], -1, SQLITE_TRANSIENT);
sqlite3_bind_text(stmt, 3, tokens[PK_PACKAGE_ID_ARCH], -1, SQLITE_TRANSIENT);
sqlite3_bind_text(stmt, 4, tokens[PK_PACKAGE_ID_DATA], -1, SQLITE_TRANSIENT);
if (sqlite3_step(stmt) == SQLITE_ROW)
{
GSList *repo;
if ((repo = g_slist_find_custom(repos, tokens[PK_PACKAGE_ID_DATA], cmp_repo)))
{
pk_backend_job_package(job, PK_INFO_ENUM_DOWNLOADING,
package_ids[i],
(char *) sqlite3_column_text(stmt, 0));
static_cast<Pkgtools *> (repo->data)->download (job,
directory, tokens[PK_PACKAGE_ID_NAME]);
path = g_build_filename(directory, (char *) sqlite3_column_text(stmt, 1), nullptr);
to_strv[0] = path;
pk_backend_job_files(job, nullptr, to_strv);
g_free(path);
}
}
sqlite3_clear_bindings(stmt);
sqlite3_reset(stmt);
g_strfreev(tokens);
}
out:
sqlite3_finalize(stmt);
}
void pk_backend_install_packages(PkBackendJob *job, char **package_ids)
{
char *dest_dir_name;
unsigned i;
gdouble percent_step;
GSList *install_list = nullptr, *l;
sqlite3_stmt *pkglist_stmt = nullptr, *collection_stmt = nullptr;
PkInfoEnum ret;
auto job_data = static_cast<JobData *> (pk_backend_job_get_user_data(job));
if ((sqlite3_prepare_v2(job_data->db,
"SELECT summary, cat FROM pkglist NATURAL JOIN repos "
"WHERE name LIKE @name AND ver LIKE @ver AND arch LIKE @arch AND repo LIKE @repo",
-1,
&pkglist_stmt,
nullptr) != SQLITE_OK) ||
(sqlite3_prepare_v2(job_data->db,
"SELECT (c.collection_pkg || ';' || p.ver || ';' || p.arch || ';' || r.repo), p.summary, "
"p.full_name, p.ext FROM collections AS c "
"JOIN pkglist AS p ON c.collection_pkg = p.name "
"JOIN repos AS r ON p.repo_order = r.repo_order "
"WHERE c.name LIKE @name AND r.repo LIKE @repo",
-1,
&collection_stmt,
nullptr) != SQLITE_OK))
{
pk_backend_job_error_code(job, PK_ERROR_ENUM_CANNOT_GET_FILELIST, "%s", sqlite3_errmsg(job_data->db));
goto out;
}
for (i = 0; package_ids[i]; i++)
{
char **tokens = pk_package_id_split(package_ids[i]);
sqlite3_bind_text(pkglist_stmt, 1, tokens[PK_PACKAGE_ID_NAME], -1, SQLITE_TRANSIENT);
sqlite3_bind_text(pkglist_stmt, 2, tokens[PK_PACKAGE_ID_VERSION], -1, SQLITE_TRANSIENT);
sqlite3_bind_text(pkglist_stmt, 3, tokens[PK_PACKAGE_ID_ARCH], -1, SQLITE_TRANSIENT);
sqlite3_bind_text(pkglist_stmt, 4, tokens[PK_PACKAGE_ID_DATA], -1, SQLITE_TRANSIENT);
if (sqlite3_step(pkglist_stmt) == SQLITE_ROW)
{
/* If it isn't a collection */
if (g_strcmp0((char *) sqlite3_column_text(pkglist_stmt, 1), "collections"))
{
install_list = g_slist_append(install_list, g_strdup(package_ids[i]));
}
else
{
sqlite3_bind_text(collection_stmt, 1, tokens[PK_PACKAGE_ID_NAME], -1, SQLITE_TRANSIENT);
sqlite3_bind_text(collection_stmt, 2, tokens[PK_PACKAGE_ID_DATA], -1, SQLITE_TRANSIENT);
while (sqlite3_step(collection_stmt) == SQLITE_ROW)
{
ret = is_installed((char*) sqlite3_column_text(collection_stmt, 2));
if ((ret == Info::installing) || (ret == Info::updating))
{
install_list = g_slist_append(install_list,
g_strdup((char *) sqlite3_column_text(collection_stmt, 0)));
}
}
sqlite3_clear_bindings(collection_stmt);
sqlite3_reset(collection_stmt);
}
}
sqlite3_clear_bindings(pkglist_stmt);
sqlite3_reset(pkglist_stmt);
g_strfreev(tokens);
}
if (install_list)
{
/* / 2 means total percentage for installing and for downloading */
percent_step = 100.0 / g_slist_length(install_list) / 2;
/* Download the packages */
dest_dir_name = g_build_filename(LOCALSTATEDIR, "cache", "PackageKit", "downloads", nullptr);
for (l = install_list, i = 0; l; l = g_slist_next(l), i++)
{
char **tokens;
GSList *repo;
pk_backend_job_set_percentage(job, percent_step * i);
tokens = pk_package_id_split((char *)(l->data));
repo = g_slist_find_custom(repos, tokens[PK_PACKAGE_ID_DATA], cmp_repo);
if (repo)
{
static_cast<Pkgtools *> (repo->data)->download (job,
dest_dir_name, tokens[PK_PACKAGE_ID_NAME]);
}
g_strfreev(tokens);
}
g_free(dest_dir_name);
/* Install the packages */
for (l = install_list; l; l = g_slist_next(l), i++)
{
char **tokens;
GSList *repo;
pk_backend_job_set_percentage(job, percent_step * i);
tokens = pk_package_id_split((char *)(l->data));
repo = g_slist_find_custom(repos, tokens[PK_PACKAGE_ID_DATA], cmp_repo);
if (repo)
{
static_cast<Pkgtools *> (repo->data)->install (job, tokens[PK_PACKAGE_ID_NAME]);
}
g_strfreev(tokens);
}
}
g_slist_free_full(install_list, g_free);
out:
sqlite3_finalize(pkglist_stmt);
sqlite3_finalize(collection_stmt);
}
void pk_backend_remove_packages(PkBackendJob *job, char **package_ids)
{
char *cmd_line;
unsigned i;
gdouble percent_step;
GError *err = nullptr;
/* Add percent_step percents per removed package */
percent_step = 100.0 / g_strv_length(package_ids);
for (i = 0; package_ids[i]; i++)
{
char **tokens;
pk_backend_job_set_percentage(job, percent_step * i);
tokens = pk_package_id_split(package_ids[i]);
cmd_line = g_strconcat("/sbin/removepkg ", tokens[PK_PACKAGE_ID_NAME], nullptr);
/* Pkgtools return always 0 */
g_spawn_command_line_sync(cmd_line, nullptr, nullptr, nullptr, &err);
g_free(cmd_line);
g_strfreev(tokens);
if (err)
{
pk_backend_job_error_code(job, PK_ERROR_ENUM_PACKAGE_FAILED_TO_REMOVE, "%s", err->message);
g_error_free(err);
return;
}
pk_backend_job_set_percentage(job, 100);
}
}
void pk_backend_get_updates(PkBackendJob *job)
{
char *pkg_id, *full_name, *desc;
const char *pkg_metadata_filename;
GFile *pkg_metadata_dir;
GFileEnumerator *pkg_metadata_enumerator;
GFileInfo *pkg_metadata_file_info;
GError *err = nullptr;
sqlite3_stmt *stmt;
auto job_data = static_cast<JobData *> (pk_backend_job_get_user_data(job));
if ((sqlite3_prepare_v2(job_data->db,
"SELECT p1.full_name, p1.name, p1.ver, p1.arch, r.repo, p1.summary, p1.ext "
"FROM pkglist AS p1 NATURAL JOIN repos AS r "
"WHERE p1.name LIKE @name AND p1.repo_order = "
"(SELECT MIN(p2.repo_order) FROM pkglist AS p2 WHERE p2.name = p1.name GROUP BY p2.name)",
-1,
&stmt,
nullptr) != SQLITE_OK))
{
pk_backend_job_error_code(job, PK_ERROR_ENUM_CANNOT_GET_FILELIST, "%s", sqlite3_errmsg(job_data->db));
goto out;
}
/* Read the package metadata directory and comprare all installed packages with ones in the cache */
pkg_metadata_dir = g_file_new_for_path("/var/log/packages");
pkg_metadata_enumerator = g_file_enumerate_children(pkg_metadata_dir, "standard::name",
G_FILE_QUERY_INFO_NONE,
nullptr,
&err);
g_object_unref(pkg_metadata_dir);
if (err)
{
pk_backend_job_error_code(job, PK_ERROR_ENUM_NO_CACHE, "/var/log/packages: %s", err->message);
g_error_free(err);
goto out;
}
while ((pkg_metadata_file_info = g_file_enumerator_next_file(pkg_metadata_enumerator, nullptr, nullptr)))
{
char **tokens;
pkg_metadata_filename = g_file_info_get_name(pkg_metadata_file_info);
tokens = split_package_name(pkg_metadata_filename);
/* Select the package from the database */
sqlite3_bind_text(stmt, 1, tokens[0], -1, SQLITE_TRANSIENT);
/* If there are more packages with the same name, remember the one from the
* repository with the lowest order. */
if ((sqlite3_step(stmt) == SQLITE_ROW)
|| g_slist_find_custom(repos, ((char *) sqlite3_column_text(stmt, 4)), cmp_repo))
{
full_name = g_strdup((char *) sqlite3_column_text(stmt, 0));
if (!g_strcmp0((char *) sqlite3_column_text(stmt, 6), "obsolete"))
{ /* Remove if obsolete */
pkg_id = pk_package_id_build(tokens[PK_PACKAGE_ID_NAME],
tokens[PK_PACKAGE_ID_VERSION],
tokens[PK_PACKAGE_ID_ARCH],
"obsolete");
/* TODO:
* 1: Use the repository name instead of "obsolete" above and check in pk_backend_update_packages()
if the package is obsolete or not
* 2: Get description from /var/log/packages, not from the database */
desc = g_strdup((char *) sqlite3_column_text(stmt, 5));
pk_backend_job_package(job, PK_INFO_ENUM_REMOVING, pkg_id, desc);
g_free(desc);
g_free(pkg_id);
}
else if (g_strcmp0(pkg_metadata_filename, full_name))
{ /* Update available */
pkg_id = pk_package_id_build((char *) sqlite3_column_text(stmt, 1),
(char *) sqlite3_column_text(stmt, 2),
(char *) sqlite3_column_text(stmt, 3),
(char *) sqlite3_column_text(stmt, 4));
desc = g_strdup((char *) sqlite3_column_text(stmt, 5));
pk_backend_job_package(job, PK_INFO_ENUM_NORMAL, pkg_id, desc);
g_free(desc);
g_free(pkg_id);
}
g_free(full_name);
}
sqlite3_clear_bindings(stmt);
sqlite3_reset(stmt);
g_strfreev(tokens);
g_object_unref(pkg_metadata_file_info);
}
g_object_unref(pkg_metadata_enumerator);
out:
sqlite3_finalize(stmt);
}
void pk_backend_update_packages(PkBackendJob *job, char **package_ids)
{
char *dest_dir_name, *cmd_line;
unsigned i;
/* Download the packages */
dest_dir_name = g_build_filename(LOCALSTATEDIR, "cache", "PackageKit", "downloads", nullptr);
for (i = 0; package_ids[i]; i++)
{
char **tokens = pk_package_id_split(package_ids[i]);
if (g_strcmp0(tokens[PK_PACKAGE_ID_DATA], "obsolete"))
{
GSList *repo = g_slist_find_custom(repos, tokens[PK_PACKAGE_ID_DATA], cmp_repo);
if (repo)
{
static_cast<Pkgtools *> (repo->data)->download (job,
dest_dir_name, tokens[PK_PACKAGE_ID_NAME]);
}
}
g_strfreev(tokens);
}
g_free(dest_dir_name);
/* Install the packages */
for (i = 0; package_ids[i]; i++)
{
char **tokens = pk_package_id_split(package_ids[i]);
if (g_strcmp0(tokens[PK_PACKAGE_ID_DATA], "obsolete"))
{
GSList *repo = g_slist_find_custom(repos, tokens[PK_PACKAGE_ID_DATA], cmp_repo);
if (repo)
{
static_cast<Pkgtools *> (repo->data)->install (job,
tokens[PK_PACKAGE_ID_NAME]);
}
}
else
{
/* Remove obsolete package
* TODO: Removing should be an independent operation (not during installing updates) */
cmd_line = g_strconcat("/sbin/removepkg ", tokens[PK_PACKAGE_ID_NAME], nullptr);
g_spawn_command_line_sync(cmd_line, nullptr, nullptr, nullptr, nullptr);
g_free(cmd_line);
}
g_strfreev(tokens);
}
}
void pk_backend_refresh_cache(PkBackendJob *job, bool force)
{
char *tmp_dir_name, *db_err, *path = nullptr;
int ret;
GSList *file_list = nullptr;
GFile *db_file = nullptr;
GFileInfo *file_info = nullptr;
GError *err = nullptr;
sqlite3_stmt *stmt = nullptr;
auto job_data = static_cast<JobData *> (pk_backend_job_get_user_data(job));
/* Create temporary directory */
tmp_dir_name = g_dir_make_tmp("PackageKit.XXXXXX", &err);
if (!tmp_dir_name)
{
pk_backend_job_error_code(job, PK_ERROR_ENUM_INTERNAL_ERROR, "%s", err->message);
g_error_free(err);
return;
}
/* Force the complete cache refresh if the read configuration file is newer than the metadata cache */
if (!force)
{
path = g_build_filename(LOCALSTATEDIR, "cache", "PackageKit", "metadata", "metadata.db", nullptr);
db_file = g_file_new_for_path(path);
file_info = g_file_query_info(db_file, "time::modified-usec", G_FILE_QUERY_INFO_NOFOLLOW_SYMLINKS, nullptr, &err);
if (err)
{
pk_backend_job_error_code(job, PK_ERROR_ENUM_NO_CACHE, "%s: %s", path, err->message);
g_error_free(err);
goto out;
}
ret = sqlite3_prepare_v2(job_data->db,
"SELECT value FROM cache_info WHERE key LIKE 'last_modification'",
-1,
&stmt,
nullptr);
if ((ret != SQLITE_OK) || ((ret = sqlite3_step(stmt)) != SQLITE_ROW))
{
pk_backend_job_error_code(job,
PK_ERROR_ENUM_NO_CACHE,
"%s: %s",
path,
sqlite3_errstr(ret));
goto out;
}
if ((std::uint32_t) sqlite3_column_int(stmt, 0) > g_file_info_get_attribute_uint32(file_info, "time::modified-usec"))
{
force = true;
}
}
if (force) /* It should empty all tables */
{
if (sqlite3_exec(job_data->db, "DELETE FROM repos", nullptr, 0, &db_err) != SQLITE_OK)
{
pk_backend_job_error_code(job, PK_ERROR_ENUM_INTERNAL_ERROR, "%s", db_err);
sqlite3_free(db_err);
goto out;
}
}
// Get list of files that should be downloaded.
for (GSList *l = repos; l; l = g_slist_next(l))
{
file_list = g_slist_concat(file_list,
static_cast<Pkgtools *> (l->data)->collect_cache_info (tmp_dir_name));
}
/* Download repository */
for (GSList *l = file_list; l; l = g_slist_next(l))
{
get_file(&job_data->curl, static_cast<char **> (l->data)[0],
static_cast<char **> (l->data)[1]);
}
g_slist_free_full(file_list, (GDestroyNotify)g_strfreev);
/* Refresh cache */
for (GSList *l = repos; l; l = g_slist_next(l))
{
static_cast<Pkgtools *> (l->data)->generate_cache (job, tmp_dir_name);
}
out:
sqlite3_finalize(stmt);
if (file_info)
{
g_object_unref(file_info);
}
if (db_file)
{
g_object_unref(db_file);
}
g_free(path);
pk_directory_remove_contents(tmp_dir_name);
g_rmdir(tmp_dir_name);
g_free(tmp_dir_name);
}
void pk_backend_get_update_detail(PkBackendJob *job, char **package_ids)
{
unsigned i;
for (i = 0; package_ids[i] != nullptr; i++)
{
pk_backend_job_update_detail (job,
package_ids[i],
nullptr,
nullptr,
nullptr,
nullptr,
nullptr,
PK_RESTART_ENUM_NONE,
nullptr,
nullptr,
PK_UPDATE_STATE_ENUM_STABLE,
nullptr,
nullptr);
}
}
+18 -8
View File
@@ -5,17 +5,27 @@
*/ */
#pragma once #pragma once
#include <pk-backend.h> using namespace katja;
#include <sqlite3.h>
namespace slack { void pk_backend_initialize(GKeyFile *conf);
void pk_backend_destroy();
bool filter_package (PkBitfield filters, bool is_installed); void pk_backend_start_job(PkBackendJob *job);
void pk_backend_stop_job(PkBackendJob *job);
} void pk_backend_search_names(PkBackendJob *job, char **values);
void pk_backend_search_details(PkBackendJob *job, char **values);
void pk_backend_search_groups(PkBackendJob *job, char **values);
void pk_backend_search_files(PkBackendJob *job, char **values);
extern "C" { void pk_backend_get_details(PkBackendJob *job, char **package_ids);
void pk_backend_resolve(PkBackendJob *job, char **packages);
void pk_backend_search_thread (PkBackendJob *job, GVariant *params, void *user_data); void pk_backend_download_packages(PkBackendJob *job, char **package_ids, const char *directory);
void pk_backend_install_packages(PkBackendJob *job, char **package_ids);
void pk_backend_remove_packages(PkBackendJob *job, char **package_ids);
} void pk_backend_get_updates(PkBackendJob *job);
void pk_backend_update_packages(PkBackendJob *job, char **package_ids);
void pk_backend_refresh_cache(PkBackendJob *job, bool force);
void pk_backend_get_update_detail(PkBackendJob *job, char **package_ids);
-931
View File
@@ -1,931 +0,0 @@
/*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/.
*/
#include <dirent.h>
#include <glib/gstdio.h>
#include <cstdint>
#include <stdlib.h>
#include <stdio.h>
#include <zlib.h>
#include <curl/curl.h>
#include <pk-backend.h>
#include <sqlite3.h>
#include "job.h"
#include "dl.h"
#include "pkgtools.h"
#include "slackpkg.h"
#include "utils.h"
using namespace slack;
static GSList *repos = nullptr;
void pk_backend_initialize(GKeyFile *conf)
{
char *path, **groups;
int ret;
gushort i;
gsize groups_len;
GFile *conf_file;
GFileInfo *file_info;
GKeyFile *key_conf;
GError *err = nullptr;
void *repo = nullptr;
sqlite3 *db;
sqlite3_stmt *stmt;
g_debug("backend: initialize");
curl_global_init(CURL_GLOBAL_DEFAULT);
/* Open the database. We will need it to save the time the configuration file was last modified. */
path = g_build_filename(LOCALSTATEDIR, "cache", "PackageKit", "metadata", "metadata.db", nullptr);
if (sqlite3_open(path, &db) != SQLITE_OK)
{
g_error("%s: %s", path, sqlite3_errmsg(db));
}
g_free(path);
/* Read the configuration file */
key_conf = g_key_file_new();
path = g_build_filename(SYSCONFDIR, "PackageKit", "Slackware.conf", nullptr);
g_key_file_load_from_file(key_conf, path, G_KEY_FILE_NONE, &err);
if (err)
{
g_error("%s: %s", path, err->message);
g_error_free(err);
}
conf_file = g_file_new_for_path(path);
if (!(file_info = g_file_query_info(conf_file,
"time::modified-usec",
G_FILE_QUERY_INFO_NOFOLLOW_SYMLINKS,
nullptr,
&err)))
{
g_error("%s", err->message);
g_error_free(err);
}
if ((ret = sqlite3_prepare_v2(db,
"UPDATE cache_info SET value = ? WHERE key LIKE 'last_modification'",
-1,
&stmt,
nullptr)) == SQLITE_OK) {
ret = sqlite3_bind_int(stmt, 1, g_file_info_get_attribute_uint32(file_info, "time::modified-usec"));
if (ret == SQLITE_OK)
{
ret = sqlite3_step(stmt);
}
sqlite3_finalize(stmt);
}
if ((ret != SQLITE_OK) && (ret != SQLITE_DONE))
{
g_error("%s: %s", path, sqlite3_errstr(ret));
}
else if (!sqlite3_changes(db))
{
g_error("Failed to update database: %s", path);
}
g_object_unref(file_info);
g_object_unref(conf_file);
sqlite3_close_v2(db);
g_free(path);
/* Initialize an object for each well-formed repository */
groups = g_key_file_get_groups(key_conf, &groups_len);
for (i = 0; i < groups_len; i++)
{
char *blacklist = g_key_file_get_string(key_conf, groups[i], "Blacklist", nullptr);
char *mirror = g_key_file_get_string(key_conf, groups[i], "Mirror", nullptr);
if (g_key_file_has_key(key_conf, groups[i], "Priority", nullptr))
{
repo = new Slackpkg (groups[i], mirror, i + 1, blacklist,
g_key_file_get_string_list(key_conf, groups[i], "Priority", nullptr, nullptr));
}
else if (g_key_file_has_key(key_conf, groups[i], "IndexFile", nullptr))
{
repo = new Dl (groups[i], mirror, i + 1, blacklist,
g_key_file_get_string(key_conf, groups[i], "IndexFile", nullptr));
}
if (repo)
{
repos = g_slist_append(repos, repo);
}
else
{
g_free(groups[i]);
}
g_free(mirror);
g_free(blacklist);
}
g_free(groups);
g_key_file_free(key_conf);
}
void
pk_backend_destroy()
{
g_debug("backend: destroy");
for (GSList *l = repos; l; l = g_slist_next (l))
{
delete static_cast<Pkgtools *> (l->data);
}
g_slist_free (repos);
curl_global_cleanup ();
}
void
pk_backend_start_job(PkBackendJob *job)
{
char *db_filename = nullptr;
JobData *job_data = g_new0(JobData, 1);
db_filename = g_build_filename(LOCALSTATEDIR, "cache", "PackageKit", "metadata", "metadata.db", nullptr);
if (sqlite3_open(db_filename, &job_data->db) == SQLITE_OK) { /* Some SQLite settings */
sqlite3_exec(job_data->db, "PRAGMA foreign_keys = ON", nullptr, nullptr, nullptr);
}
else
{
pk_backend_job_error_code(job, PK_ERROR_ENUM_NO_CACHE,
"%s: %s",
db_filename,
sqlite3_errmsg(job_data->db));
goto out;
}
pk_backend_job_set_user_data(job, job_data);
out:
g_free(db_filename);
}
void
pk_backend_stop_job(PkBackendJob *job)
{
auto job_data = static_cast<JobData *> (pk_backend_job_get_user_data(job));
if (job_data->curl)
{
curl_easy_cleanup(job_data->curl);
}
sqlite3_close(job_data->db);
g_free(job_data);
pk_backend_job_set_user_data(job, nullptr);
}
void
pk_backend_search_names(PkBackendJob *job, char **values)
{
pk_backend_job_thread_create(job, pk_backend_search_thread, (void *) "name", nullptr);
}
void
pk_backend_search_details(PkBackendJob *job, char **values)
{
pk_backend_job_thread_create(job, pk_backend_search_thread, (void *) "desc", nullptr);
}
void
pk_backend_search_groups(PkBackendJob *job, char **values)
{
pk_backend_job_thread_create(job, pk_backend_search_thread, (void *) "cat", nullptr);
}
static void
pk_backend_search_files_thread(PkBackendJob *job, GVariant *params, void *user_data)
{
char **vals, *search;
char *query;
sqlite3_stmt *stmt;
Info ret;
auto job_data = static_cast<JobData *> (pk_backend_job_get_user_data(job));
pk_backend_job_set_percentage(job, 0);
g_variant_get(params, "(t^a&s)", nullptr, &vals);
search = g_strjoinv("%", vals);
query = sqlite3_mprintf("SELECT (p.name || ';' || p.ver || ';' || p.arch || ';' || r.repo), p.summary, "
"p.full_name FROM filelist AS f NATURAL JOIN pkglist AS p NATURAL JOIN repos AS r "
"WHERE f.filename LIKE '%%%q%%' GROUP BY f.full_name", search);
if ((sqlite3_prepare_v2(job_data->db, query, -1, &stmt, nullptr) == SQLITE_OK))
{
/* Now we're ready to output all packages */
while (sqlite3_step(stmt) == SQLITE_ROW)
{
ret = is_installed((char*) sqlite3_column_text(stmt, 2));
if ((ret == Info::installed) || (ret == Info::updating))
{
pk_backend_job_package(job, PK_INFO_ENUM_INSTALLED,
(char*) sqlite3_column_text(stmt, 0),
(char*) sqlite3_column_text(stmt, 1));
}
else if (ret == PK_INFO_ENUM_INSTALLING)
{
pk_backend_job_package(job, PK_INFO_ENUM_AVAILABLE,
(char*) sqlite3_column_text(stmt, 0),
(char*) sqlite3_column_text(stmt, 1));
}
}
sqlite3_finalize(stmt);
}
else
{
pk_backend_job_error_code(job, PK_ERROR_ENUM_CANNOT_GET_FILELIST, "%s", sqlite3_errmsg(job_data->db));
}
sqlite3_free(query);
g_free(search);
pk_backend_job_set_percentage(job, 100);
}
void
pk_backend_search_files(PkBackendJob *job, char **values)
{
pk_backend_job_thread_create(job, pk_backend_search_files_thread, nullptr, nullptr);
}
static void
pk_backend_get_details_thread(PkBackendJob *job, char **pkg_ids, void *user_data)
{
char *homepage = nullptr;
char** tokens;
gsize i;
GString *desc;
GRegex *expr;
GMatchInfo *match_info;
GError *err = nullptr;
sqlite3_stmt *stmt;
auto job_data = static_cast<JobData *> (pk_backend_job_get_user_data(job));
if ((sqlite3_prepare_v2(job_data->db,
"SELECT p.desc, p.cat, p.uncompressed FROM pkglist AS p NATURAL JOIN repos AS r "
"WHERE name LIKE @name AND r.repo LIKE @repo AND ext NOT LIKE 'obsolete'",
-1,
&stmt,
nullptr) != SQLITE_OK)) {
pk_backend_job_error_code(job, PK_ERROR_ENUM_CANNOT_GET_FILELIST, "%s", sqlite3_errmsg(job_data->db));
goto out;
}
tokens = pk_package_id_split(pkg_ids[0]);
sqlite3_bind_text(stmt, 1, tokens[PK_PACKAGE_ID_NAME], -1, SQLITE_TRANSIENT);
sqlite3_bind_text(stmt, 2, tokens[PK_PACKAGE_ID_DATA], -1, SQLITE_TRANSIENT);
g_strfreev(tokens);
if (sqlite3_step(stmt) != SQLITE_ROW)
goto out;
desc = g_string_new((char *) sqlite3_column_text(stmt, 0));
/* Regular expression for searching a homepage */
expr = g_regex_new("(?:http|ftp):\\/\\/[[:word:]\\/\\-\\.]+[[:word:]\\/](?=\\.?$)",
(GRegexCompileFlags)(G_REGEX_OPTIMIZE | G_REGEX_DUPNAMES),
(GRegexMatchFlags)(0),
&err);
if (err)
{
pk_backend_job_error_code(job, PK_ERROR_ENUM_UNKNOWN, "%s", err->message);
g_error_free(err);
goto out;
}
if (g_regex_match(expr, desc->str, (GRegexMatchFlags)0, &match_info))
{
homepage = g_match_info_fetch(match_info, 0); /* URL */
/* Remove the last sentence with the copied URL */
for (i = desc->len - 1; i > 0; i--)
{
if ((desc->str[i - 1] == '.') && (desc->str[i] == ' '))
{
g_string_truncate(desc, i);
break;
}
}
g_match_info_free(match_info);
}
g_regex_unref(expr);
/* Ready */
pk_backend_job_details(job,
pkg_ids[0],
nullptr,
nullptr,
pk_group_enum_from_string((char *) sqlite3_column_text(stmt, 1)),
desc->str,
homepage,
sqlite3_column_int(stmt, 2),
G_MAXUINT64);
g_free(homepage);
if (desc)
{
g_string_free(desc, true);
}
out:
sqlite3_finalize(stmt);
}
void
pk_backend_get_details(PkBackendJob *job, char **package_ids)
{
pk_backend_job_thread_create(job, pk_backend_get_details_thread, nullptr, nullptr);
}
static void
pk_backend_resolve_thread(PkBackendJob *job, GVariant *params, void *user_data)
{
char **vals, **val;
sqlite3_stmt *stmt;
PkInfoEnum ret;
auto job_data = static_cast<JobData *> (pk_backend_job_get_user_data(job));
pk_backend_job_set_percentage(job, 0);
g_variant_get(params, "(t^a&s)", nullptr, &vals);
if ((sqlite3_prepare_v2(job_data->db,
"SELECT (p1.name || ';' || p1.ver || ';' || p1.arch || ';' || r.repo), p1.summary, "
"p1.full_name FROM pkglist AS p1 NATURAL JOIN repos AS r "
"WHERE p1.name LIKE @search AND p1.repo_order = "
"(SELECT MIN(p2.repo_order) FROM pkglist AS p2 WHERE p2.name = p1.name GROUP BY p2.name)",
-1,
&stmt,
nullptr) == SQLITE_OK)) {
/* Output packages matching each pattern */
for (val = vals; *val; val++)
{
sqlite3_bind_text(stmt, 1, *val, -1, SQLITE_TRANSIENT);
while (sqlite3_step(stmt) == SQLITE_ROW)
{
if ((ret == Info::installed) || (ret == Info::updating))
{
pk_backend_job_package(job, PK_INFO_ENUM_INSTALLED,
(char*) sqlite3_column_text(stmt, 0),
(char*) sqlite3_column_text(stmt, 1));
}
else if (ret == PK_INFO_ENUM_INSTALLING)
{
pk_backend_job_package(job, PK_INFO_ENUM_AVAILABLE,
(char*) sqlite3_column_text(stmt, 0),
(char*) sqlite3_column_text(stmt, 1));
}
}
sqlite3_clear_bindings(stmt);
sqlite3_reset(stmt);
}
sqlite3_finalize(stmt);
} else {
pk_backend_job_error_code(job, PK_ERROR_ENUM_CANNOT_GET_FILELIST, "%s", sqlite3_errmsg(job_data->db));
}
pk_backend_job_set_percentage(job, 100);
}
void
pk_backend_resolve(PkBackendJob *job, char **packages)
{
pk_backend_job_thread_create(job, pk_backend_resolve_thread, nullptr, nullptr);
}
static void
pk_backend_download_packages_thread(PkBackendJob *job, GVariant *params, void *user_data)
{
char *dir_path, *path, **pkg_ids, *to_strv[] = {nullptr, nullptr};
unsigned i;
sqlite3_stmt *stmt;
auto job_data = static_cast<JobData *> (pk_backend_job_get_user_data(job));
g_variant_get(params, "(^a&ss)", &pkg_ids, &dir_path);
if ((sqlite3_prepare_v2(job_data->db,
"SELECT summary, (full_name || '.' || ext) FROM pkglist NATURAL JOIN repos "
"WHERE name LIKE @name AND ver LIKE @ver AND arch LIKE @arch AND repo LIKE @repo",
-1,
&stmt,
nullptr) != SQLITE_OK))
{
pk_backend_job_error_code(job, PK_ERROR_ENUM_CANNOT_GET_FILELIST, "%s", sqlite3_errmsg(job_data->db));
goto out;
}
for (i = 0; pkg_ids[i]; ++i)
{
char **tokens = pk_package_id_split(pkg_ids[i]);
sqlite3_bind_text(stmt, 1, tokens[PK_PACKAGE_ID_NAME], -1, SQLITE_TRANSIENT);
sqlite3_bind_text(stmt, 2, tokens[PK_PACKAGE_ID_VERSION], -1, SQLITE_TRANSIENT);
sqlite3_bind_text(stmt, 3, tokens[PK_PACKAGE_ID_ARCH], -1, SQLITE_TRANSIENT);
sqlite3_bind_text(stmt, 4, tokens[PK_PACKAGE_ID_DATA], -1, SQLITE_TRANSIENT);
if (sqlite3_step(stmt) == SQLITE_ROW)
{
GSList *repo;
if ((repo = g_slist_find_custom(repos, tokens[PK_PACKAGE_ID_DATA], cmp_repo)))
{
pk_backend_job_package(job, PK_INFO_ENUM_DOWNLOADING,
pkg_ids[i],
(char *) sqlite3_column_text(stmt, 0));
static_cast<Pkgtools *> (repo->data)->download (job,
dir_path, tokens[PK_PACKAGE_ID_NAME]);
path = g_build_filename(dir_path, (char *) sqlite3_column_text(stmt, 1), nullptr);
to_strv[0] = path;
pk_backend_job_files(job, nullptr, to_strv);
g_free(path);
}
}
sqlite3_clear_bindings(stmt);
sqlite3_reset(stmt);
g_strfreev(tokens);
}
out:
sqlite3_finalize(stmt);
}
void
pk_backend_download_packages(PkBackendJob *job, char **package_ids, const char *directory)
{
pk_backend_job_thread_create(job, pk_backend_download_packages_thread, nullptr, nullptr);
}
static void
pk_backend_install_packages_thread(PkBackendJob *job, char **pkg_ids, void *user_data)
{
char *dest_dir_name;
unsigned i;
gdouble percent_step;
GSList *install_list = nullptr, *l;
sqlite3_stmt *pkglist_stmt = nullptr, *collection_stmt = nullptr;
PkInfoEnum ret;
auto job_data = static_cast<JobData *> (pk_backend_job_get_user_data(job));
if ((sqlite3_prepare_v2(job_data->db,
"SELECT summary, cat FROM pkglist NATURAL JOIN repos "
"WHERE name LIKE @name AND ver LIKE @ver AND arch LIKE @arch AND repo LIKE @repo",
-1,
&pkglist_stmt,
nullptr) != SQLITE_OK) ||
(sqlite3_prepare_v2(job_data->db,
"SELECT (c.collection_pkg || ';' || p.ver || ';' || p.arch || ';' || r.repo), p.summary, "
"p.full_name, p.ext FROM collections AS c "
"JOIN pkglist AS p ON c.collection_pkg = p.name "
"JOIN repos AS r ON p.repo_order = r.repo_order "
"WHERE c.name LIKE @name AND r.repo LIKE @repo",
-1,
&collection_stmt,
nullptr) != SQLITE_OK))
{
pk_backend_job_error_code(job, PK_ERROR_ENUM_CANNOT_GET_FILELIST, "%s", sqlite3_errmsg(job_data->db));
goto out;
}
for (i = 0; pkg_ids[i]; i++)
{
char **tokens = pk_package_id_split(pkg_ids[i]);
sqlite3_bind_text(pkglist_stmt, 1, tokens[PK_PACKAGE_ID_NAME], -1, SQLITE_TRANSIENT);
sqlite3_bind_text(pkglist_stmt, 2, tokens[PK_PACKAGE_ID_VERSION], -1, SQLITE_TRANSIENT);
sqlite3_bind_text(pkglist_stmt, 3, tokens[PK_PACKAGE_ID_ARCH], -1, SQLITE_TRANSIENT);
sqlite3_bind_text(pkglist_stmt, 4, tokens[PK_PACKAGE_ID_DATA], -1, SQLITE_TRANSIENT);
if (sqlite3_step(pkglist_stmt) == SQLITE_ROW)
{
/* If it isn't a collection */
if (g_strcmp0((char *) sqlite3_column_text(pkglist_stmt, 1), "collections"))
{
install_list = g_slist_append(install_list, g_strdup(pkg_ids[i]));
}
else
{
sqlite3_bind_text(collection_stmt, 1, tokens[PK_PACKAGE_ID_NAME], -1, SQLITE_TRANSIENT);
sqlite3_bind_text(collection_stmt, 2, tokens[PK_PACKAGE_ID_DATA], -1, SQLITE_TRANSIENT);
while (sqlite3_step(collection_stmt) == SQLITE_ROW)
{
ret = is_installed((char*) sqlite3_column_text(collection_stmt, 2));
if ((ret == Info::installing) || (ret == Info::updating))
{
install_list = g_slist_append(install_list,
g_strdup((char *) sqlite3_column_text(collection_stmt, 0)));
}
}
sqlite3_clear_bindings(collection_stmt);
sqlite3_reset(collection_stmt);
}
}
sqlite3_clear_bindings(pkglist_stmt);
sqlite3_reset(pkglist_stmt);
g_strfreev(tokens);
}
if (install_list)
{
/* / 2 means total percentage for installing and for downloading */
percent_step = 100.0 / g_slist_length(install_list) / 2;
/* Download the packages */
dest_dir_name = g_build_filename(LOCALSTATEDIR, "cache", "PackageKit", "downloads", nullptr);
for (l = install_list, i = 0; l; l = g_slist_next(l), i++)
{
char **tokens;
GSList *repo;
pk_backend_job_set_percentage(job, percent_step * i);
tokens = pk_package_id_split((char *)(l->data));
repo = g_slist_find_custom(repos, tokens[PK_PACKAGE_ID_DATA], cmp_repo);
if (repo)
{
static_cast<Pkgtools *> (repo->data)->download (job,
dest_dir_name, tokens[PK_PACKAGE_ID_NAME]);
}
g_strfreev(tokens);
}
g_free(dest_dir_name);
/* Install the packages */
for (l = install_list; l; l = g_slist_next(l), i++)
{
char **tokens;
GSList *repo;
pk_backend_job_set_percentage(job, percent_step * i);
tokens = pk_package_id_split((char *)(l->data));
repo = g_slist_find_custom(repos, tokens[PK_PACKAGE_ID_DATA], cmp_repo);
if (repo)
{
static_cast<Pkgtools *> (repo->data)->install (job, tokens[PK_PACKAGE_ID_NAME]);
}
g_strfreev(tokens);
}
}
g_slist_free_full(install_list, g_free);
out:
sqlite3_finalize(pkglist_stmt);
sqlite3_finalize(collection_stmt);
}
void
pk_backend_install_packages(PkBackendJob *job, char **package_ids)
{
pk_backend_job_thread_create(job, pk_backend_install_packages_thread, nullptr, nullptr);
}
static void
pk_backend_remove_packages_thread(PkBackendJob* job, char **pkg_ids)
{
char *cmd_line;
unsigned i;
gdouble percent_step;
GError *err = nullptr;
/* Add percent_step percents per removed package */
percent_step = 100.0 / g_strv_length(pkg_ids);
for (i = 0; pkg_ids[i]; i++)
{
char **tokens;
pk_backend_job_set_percentage(job, percent_step * i);
tokens = pk_package_id_split(pkg_ids[i]);
cmd_line = g_strconcat("/sbin/removepkg ", tokens[PK_PACKAGE_ID_NAME], nullptr);
/* Pkgtools return always 0 */
g_spawn_command_line_sync(cmd_line, nullptr, nullptr, nullptr, &err);
g_free(cmd_line);
g_strfreev(tokens);
if (err)
{
pk_backend_job_error_code(job, PK_ERROR_ENUM_PACKAGE_FAILED_TO_REMOVE, "%s", err->message);
g_error_free(err);
return;
}
pk_backend_job_set_percentage(job, 100);
}
}
void
pk_backend_remove_packages(PkBackendJob *job, char **package_ids)
{
pk_backend_job_thread_create(job, pk_backend_remove_packages_thread, nullptr, nullptr);
}
static void
pk_backend_get_updates_thread(PkBackendJob *job, void *user_data)
{
char *pkg_id, *full_name, *desc;
const char *pkg_metadata_filename;
GFile *pkg_metadata_dir;
GFileEnumerator *pkg_metadata_enumerator;
GFileInfo *pkg_metadata_file_info;
GError *err = nullptr;
sqlite3_stmt *stmt;
auto job_data = static_cast<JobData *> (pk_backend_job_get_user_data(job));
if ((sqlite3_prepare_v2(job_data->db,
"SELECT p1.full_name, p1.name, p1.ver, p1.arch, r.repo, p1.summary, p1.ext "
"FROM pkglist AS p1 NATURAL JOIN repos AS r "
"WHERE p1.name LIKE @name AND p1.repo_order = "
"(SELECT MIN(p2.repo_order) FROM pkglist AS p2 WHERE p2.name = p1.name GROUP BY p2.name)",
-1,
&stmt,
nullptr) != SQLITE_OK))
{
pk_backend_job_error_code(job, PK_ERROR_ENUM_CANNOT_GET_FILELIST, "%s", sqlite3_errmsg(job_data->db));
goto out;
}
/* Read the package metadata directory and comprare all installed packages with ones in the cache */
pkg_metadata_dir = g_file_new_for_path("/var/log/packages");
pkg_metadata_enumerator = g_file_enumerate_children(pkg_metadata_dir, "standard::name",
G_FILE_QUERY_INFO_NONE,
nullptr,
&err);
g_object_unref(pkg_metadata_dir);
if (err)
{
pk_backend_job_error_code(job, PK_ERROR_ENUM_NO_CACHE, "/var/log/packages: %s", err->message);
g_error_free(err);
goto out;
}
while ((pkg_metadata_file_info = g_file_enumerator_next_file(pkg_metadata_enumerator, nullptr, nullptr)))
{
char **tokens;
pkg_metadata_filename = g_file_info_get_name(pkg_metadata_file_info);
tokens = split_package_name(pkg_metadata_filename);
/* Select the package from the database */
sqlite3_bind_text(stmt, 1, tokens[0], -1, SQLITE_TRANSIENT);
/* If there are more packages with the same name, remember the one from the
* repository with the lowest order. */
if ((sqlite3_step(stmt) == SQLITE_ROW)
|| g_slist_find_custom(repos, ((char *) sqlite3_column_text(stmt, 4)), cmp_repo))
{
full_name = g_strdup((char *) sqlite3_column_text(stmt, 0));
if (!g_strcmp0((char *) sqlite3_column_text(stmt, 6), "obsolete"))
{ /* Remove if obsolete */
pkg_id = pk_package_id_build(tokens[PK_PACKAGE_ID_NAME],
tokens[PK_PACKAGE_ID_VERSION],
tokens[PK_PACKAGE_ID_ARCH],
"obsolete");
/* TODO:
* 1: Use the repository name instead of "obsolete" above and check in pk_backend_update_packages()
if the package is obsolete or not
* 2: Get description from /var/log/packages, not from the database */
desc = g_strdup((char *) sqlite3_column_text(stmt, 5));
pk_backend_job_package(job, PK_INFO_ENUM_REMOVING, pkg_id, desc);
g_free(desc);
g_free(pkg_id);
}
else if (g_strcmp0(pkg_metadata_filename, full_name))
{ /* Update available */
pkg_id = pk_package_id_build((char *) sqlite3_column_text(stmt, 1),
(char *) sqlite3_column_text(stmt, 2),
(char *) sqlite3_column_text(stmt, 3),
(char *) sqlite3_column_text(stmt, 4));
desc = g_strdup((char *) sqlite3_column_text(stmt, 5));
pk_backend_job_package(job, PK_INFO_ENUM_NORMAL, pkg_id, desc);
g_free(desc);
g_free(pkg_id);
}
g_free(full_name);
}
sqlite3_clear_bindings(stmt);
sqlite3_reset(stmt);
g_strfreev(tokens);
g_object_unref(pkg_metadata_file_info);
}
g_object_unref(pkg_metadata_enumerator);
out:
sqlite3_finalize(stmt);
}
void
pk_backend_get_updates(PkBackendJob *job)
{
pk_backend_job_thread_create(job, pk_backend_get_updates_thread, nullptr, nullptr);
}
static void
pk_backend_update_packages_thread(PkBackendJob *job, char **pkg_ids, void *user_data)
{
char *dest_dir_name, *cmd_line;
unsigned i;
/* Download the packages */
dest_dir_name = g_build_filename(LOCALSTATEDIR, "cache", "PackageKit", "downloads", nullptr);
for (i = 0; pkg_ids[i]; i++)
{
char **tokens = pk_package_id_split(pkg_ids[i]);
if (g_strcmp0(tokens[PK_PACKAGE_ID_DATA], "obsolete"))
{
GSList *repo = g_slist_find_custom(repos, tokens[PK_PACKAGE_ID_DATA], cmp_repo);
if (repo)
{
static_cast<Pkgtools *> (repo->data)->download (job,
dest_dir_name, tokens[PK_PACKAGE_ID_NAME]);
}
}
g_strfreev(tokens);
}
g_free(dest_dir_name);
/* Install the packages */
for (i = 0; pkg_ids[i]; i++)
{
char **tokens = pk_package_id_split(pkg_ids[i]);
if (g_strcmp0(tokens[PK_PACKAGE_ID_DATA], "obsolete"))
{
GSList *repo = g_slist_find_custom(repos, tokens[PK_PACKAGE_ID_DATA], cmp_repo);
if (repo)
{
static_cast<Pkgtools *> (repo->data)->install (job,
tokens[PK_PACKAGE_ID_NAME]);
}
}
else
{
/* Remove obsolete package
* TODO: Removing should be an independent operation (not during installing updates) */
cmd_line = g_strconcat("/sbin/removepkg ", tokens[PK_PACKAGE_ID_NAME], nullptr);
g_spawn_command_line_sync(cmd_line, nullptr, nullptr, nullptr, nullptr);
g_free(cmd_line);
}
g_strfreev(tokens);
}
}
void
pk_backend_update_packages(PkBackendJob *job, char **package_ids)
{
pk_backend_job_thread_create(job, pk_backend_update_packages_thread, nullptr, nullptr);
}
static void
pk_backend_refresh_cache_thread(PkBackendJob *job, bool force, void *user_data)
{
char *tmp_dir_name, *db_err, *path = nullptr;
int ret;
GSList *file_list = nullptr;
GFile *db_file = nullptr;
GFileInfo *file_info = nullptr;
GError *err = nullptr;
sqlite3_stmt *stmt = nullptr;
auto job_data = static_cast<JobData *> (pk_backend_job_get_user_data(job));
/* Create temporary directory */
tmp_dir_name = g_dir_make_tmp("PackageKit.XXXXXX", &err);
if (!tmp_dir_name)
{
pk_backend_job_error_code(job, PK_ERROR_ENUM_INTERNAL_ERROR, "%s", err->message);
g_error_free(err);
return;
}
/* Force the complete cache refresh if the read configuration file is newer than the metadata cache */
if (!force)
{
path = g_build_filename(LOCALSTATEDIR, "cache", "PackageKit", "metadata", "metadata.db", nullptr);
db_file = g_file_new_for_path(path);
file_info = g_file_query_info(db_file, "time::modified-usec", G_FILE_QUERY_INFO_NOFOLLOW_SYMLINKS, nullptr, &err);
if (err)
{
pk_backend_job_error_code(job, PK_ERROR_ENUM_NO_CACHE, "%s: %s", path, err->message);
g_error_free(err);
goto out;
}
ret = sqlite3_prepare_v2(job_data->db,
"SELECT value FROM cache_info WHERE key LIKE 'last_modification'",
-1,
&stmt,
nullptr);
if ((ret != SQLITE_OK) || ((ret = sqlite3_step(stmt)) != SQLITE_ROW))
{
pk_backend_job_error_code(job,
PK_ERROR_ENUM_NO_CACHE,
"%s: %s",
path,
sqlite3_errstr(ret));
goto out;
}
if ((std::uint32_t) sqlite3_column_int(stmt, 0) > g_file_info_get_attribute_uint32(file_info, "time::modified-usec"))
{
force = true;
}
}
if (force) /* It should empty all tables */
{
if (sqlite3_exec(job_data->db, "DELETE FROM repos", nullptr, 0, &db_err) != SQLITE_OK)
{
pk_backend_job_error_code(job, PK_ERROR_ENUM_INTERNAL_ERROR, "%s", db_err);
sqlite3_free(db_err);
goto out;
}
}
// Get list of files that should be downloaded.
for (GSList *l = repos; l; l = g_slist_next(l))
{
file_list = g_slist_concat(file_list,
static_cast<Pkgtools *> (l->data)->collect_cache_info (tmp_dir_name));
}
/* Download repository */
for (GSList *l = file_list; l; l = g_slist_next(l))
{
get_file(&job_data->curl, static_cast<char **> (l->data)[0],
static_cast<char **> (l->data)[1]);
}
g_slist_free_full(file_list, (GDestroyNotify)g_strfreev);
/* Refresh cache */
for (GSList *l = repos; l; l = g_slist_next(l))
{
static_cast<Pkgtools *> (l->data)->generate_cache (job, tmp_dir_name);
}
out:
sqlite3_finalize(stmt);
if (file_info)
{
g_object_unref(file_info);
}
if (db_file)
{
g_object_unref(db_file);
}
g_free(path);
pk_directory_remove_contents(tmp_dir_name);
g_rmdir(tmp_dir_name);
g_free(tmp_dir_name);
}
void
pk_backend_refresh_cache(PkBackendJob *job, bool force)
{
pk_backend_job_thread_create(job, pk_backend_refresh_cache_thread, nullptr, nullptr);
}
static void
pk_backend_get_update_detail_thread(PkBackendJob *job, char **pkg_ids, void *user_data)
{
unsigned i;
for (i = 0; pkg_ids[i] != nullptr; i++)
{
pk_backend_job_update_detail (job,
pkg_ids[i],
nullptr,
nullptr,
nullptr,
nullptr,
nullptr,
PK_RESTART_ENUM_NONE,
nullptr,
nullptr,
PK_UPDATE_STATE_ENUM_STABLE,
nullptr,
nullptr);
}
}
void
pk_backend_get_update_detail(PkBackendJob *job, char **package_ids)
{
pk_backend_job_thread_create(job, pk_backend_get_update_detail_thread, nullptr, nullptr);
}
+8 -8
View File
@@ -7,10 +7,10 @@
#include <sqlite3.h> #include <sqlite3.h>
#include "pkgtools.h" #include "pkgtools.h"
namespace slack { namespace katja
{
/** /**
* slack::Pkgtools::download: * katja::Pkgtools::download:
* @job: A #PkBackendJob. * @job: A #PkBackendJob.
* @dest_dir_name: Destination directory. * @dest_dir_name: Destination directory.
* @pkg_name: Package name. * @pkg_name: Package name.
@@ -73,7 +73,7 @@ Pkgtools::download (JobData *job_data,
} }
/** /**
* slack::Pkgtools::install: * katja::Pkgtools::install:
* @job_data: A #JobData. * @job_data: A #JobData.
* @pkg_name: Package name. * @pkg_name: Package name.
* *
@@ -120,7 +120,7 @@ Pkgtools::~Pkgtools () noexcept
} }
/** /**
* slack::Pkgtools::get_name: * katja::Pkgtools::get_name:
* *
* Retrieves the repository name. * Retrieves the repository name.
* *
@@ -133,7 +133,7 @@ Pkgtools::get_name () const noexcept
} }
/** /**
* slack::Pkgtools::get_mirror: * katja::Pkgtools::get_mirror:
* *
* Retrieves the repository mirror. * Retrieves the repository mirror.
* *
@@ -146,7 +146,7 @@ Pkgtools::get_mirror () const noexcept
} }
/** /**
* slack::Pkgtools::get_order: * katja::Pkgtools::get_order:
* *
* Retrieves the repository order. * Retrieves the repository order.
* *
@@ -159,7 +159,7 @@ Pkgtools::get_order () const noexcept
} }
/** /**
* slack::Pkgtools:is_blacklisted: * katja::Pkgtools:is_blacklisted:
* @pkg: Package name to check for. * @pkg: Package name to check for.
* *
* Checks whether a package is blacklisted. * Checks whether a package is blacklisted.
+2 -2
View File
@@ -9,8 +9,8 @@
#include <glib-object.h> #include <glib-object.h>
#include "utils.h" #include "utils.h"
namespace slack { namespace katja
{
class Pkgtools class Pkgtools
{ {
public: public:
+7 -7
View File
@@ -5,12 +5,12 @@
#include "slackpkg.h" #include "slackpkg.h"
#include "utils.h" #include "utils.h"
namespace slack { namespace katja
{
GHashTable *Slackpkg::cat_map = nullptr; GHashTable *Slackpkg::cat_map = nullptr;
/* /*
* slack::Slackpkg::manifest: * katja::Slackpkg::manifest:
* @job: a #PkBackendJob. * @job: a #PkBackendJob.
* @tmpl: temporary directory. * @tmpl: temporary directory.
* @filename: manifest filename * @filename: manifest filename
@@ -150,7 +150,7 @@ out:
} }
/** /**
* slack::Slackpkg::collect_cache_info: * katja::Slackpkg::collect_cache_info:
* @tmpl: temporary directory for downloading the files. * @tmpl: temporary directory for downloading the files.
* *
* Download files needed to get the information like the list of packages * Download files needed to get the information like the list of packages
@@ -228,7 +228,7 @@ out:
} }
/** /**
* slack::Slackpkg::generate_cache: * katja::Slackpkg::generate_cache:
* @job_data: A #JobData. * @job_data: A #JobData.
* @tmpl: temporary directory for downloading the files. * @tmpl: temporary directory for downloading the files.
* *
@@ -463,7 +463,7 @@ Slackpkg::~Slackpkg () noexcept
} }
/** /**
* slack::Slackpkg::Slackpkg: * katja::Slackpkg::Slackpkg:
* @name: Repository name. * @name: Repository name.
* @mirror: Repository mirror. * @mirror: Repository mirror.
* @order: Repository order. * @order: Repository order.
@@ -472,7 +472,7 @@ Slackpkg::~Slackpkg () noexcept
* *
* Constructor. * Constructor.
* *
* Returns: New #slack::Slackpkg. * Returns: New #katja::Slackpkg.
**/ **/
Slackpkg::Slackpkg (const char *name, const char *mirror, Slackpkg::Slackpkg (const char *name, const char *mirror,
std::uint8_t order, const char *blacklist, char **priority) noexcept std::uint8_t order, const char *blacklist, char **priority) noexcept
+2 -2
View File
@@ -10,8 +10,8 @@
#include "pkgtools.h" #include "pkgtools.h"
#include "utils.h" #include "utils.h"
namespace slack { namespace katja
{
class Slackpkg final : public Pkgtools class Slackpkg final : public Pkgtools
{ {
public: public:
+6 -6
View File
@@ -9,10 +9,10 @@
#include "utils.h" #include "utils.h"
#include "pkgtools.h" #include "pkgtools.h"
namespace slack { namespace katja
{
/** /**
* slack::get_file: * katja::get_file:
* @curl: curl easy handle. * @curl: curl easy handle.
* @source_url: source url. * @source_url: source url.
* @dest: destination. * @dest: destination.
@@ -73,7 +73,7 @@ get_file (CURL **curl, char *source_url, char *dest)
} }
/** /**
* slack::split_package_name: * katja::split_package_name:
* Got the name of a package, without version-arch-release data. * Got the name of a package, without version-arch-release data.
**/ **/
char ** char **
@@ -125,7 +125,7 @@ split_package_name (const char *pkg_filename)
} }
/** /**
* slack::is_installed: * katja::is_installed:
* Checks if a package is already installed in the system. * Checks if a package is already installed in the system.
* *
* Params: * Params:
@@ -222,7 +222,7 @@ is_installed (const char *pkg_fullname)
} }
/** /**
* slack::cmp_repo: * katja::cmp_repo:
**/ **/
int int
cmp_repo (const void *a, const void *b) cmp_repo (const void *a, const void *b)
+2 -6
View File
@@ -7,8 +7,8 @@
#include <curl/curl.h> #include <curl/curl.h>
namespace slack { namespace katja
{
struct JobData struct JobData
{ {
sqlite3 *db; sqlite3 *db;
@@ -33,10 +33,6 @@ char **split_package_name (const char *pkg_filename);
Info is_installed (const char *pkg_fullname); Info is_installed (const char *pkg_fullname);
extern "C" {
int cmp_repo (const void *a, const void *b); int cmp_repo (const void *a, const void *b);
} }
}