diff --git a/README.md b/README.md index 65436571..83686ab7 100644 --- a/README.md +++ b/README.md @@ -94,31 +94,23 @@ debug('dat-related stuff') ### `dbs.archives` -### `dbs.bookmarks` - ### `dbs.history` ### `dbs.settings` ### `dbs.sitedata` -### `dbs.templates` - ### `dat` ### `dat.library` ### `dat.dns` -### `dat.folderSync` - -### `dat.garbageCollector` - ### `dat.protocol` ### `dat.debug` -### `crawler` +### `uwg` ### `users` diff --git a/applications.js b/applications.js deleted file mode 100644 index 654a0651..00000000 --- a/applications.js +++ /dev/null @@ -1,69 +0,0 @@ -const sessionPerms = require('./lib/session-perms') -const knex = require('./lib/knex') -const db = require('./dbs/profile-data-db') -const sitedataDb = require('./dbs/sitedata') -const dat = require('./dat') - -// typedefs -// = - -/** - * @typedef {Object} ApplicationPermission - * @prop {string} id - * @prop {string[]} caps - * @prop {string} description - * - * @typedef {Object} ApplicationState - * @prop {string} url - * @prop {ApplicationPermission[]} permissions - * @prop {boolean} installed - * @prop {boolean} enabled - * @prop {string} installedAt - */ - -// exported api -// = - -/** - * @param {Object} opts - * @param {number} opts.userId - * @param {string} opts.url - * @returns {Promise} - */ -exports.getApplicationState = async function ({userId, url}) { - url = await dat.library.getPrimaryUrl(url) - var record = await db.get(knex('installed_applications').where({userId, url})) - if (record) { - record.installed = true - } else { - record = { - url, - installed: false, - enabled: false, - installedAt: null - } - } - record.permissions = await sitedataDb.getAppPermissions(record.url) - return massageAppRecord(record) -} - -// internal methods -// = - -/** - * @param {Object} record - * @returns {ApplicationState} - */ -function massageAppRecord (record) { - return { - url: record.url, - permissions: Object.entries(record.permissions).map(([id, caps]) => ({ - id, - caps, - description: sessionPerms.describePerm(id, caps) - })), - installed: record.installed, - enabled: Boolean(record.enabled), - installedAt: record.createdAt ? (new Date(record.createdAt)).toISOString() : null - } -} \ No newline at end of file diff --git a/crawler/discussions.js b/crawler/discussions.js deleted file mode 100644 index f6b7e2e0..00000000 --- a/crawler/discussions.js +++ /dev/null @@ -1,418 +0,0 @@ -const assert = require('assert') -const {URL} = require('url') -const Events = require('events') -const Ajv = require('ajv') -const logger = require('../logger').child({category: 'crawler', dataset: 'discussions'}) -const db = require('../dbs/profile-data-db') -const crawler = require('./index') -const datLibrary = require('../dat/library') -const lock = require('../lib/lock') -const knex = require('../lib/knex') -const siteDescriptions = require('./site-descriptions') -const {doCrawl, doCheckpoint, emitProgressEvent, getMatchingChangesInOrder, generateTimeFilename, ensureDirectory} = require('./util') -const discussionSchema = require('./json-schemas/discussion') - -// constants -// = - -const TABLE_VERSION = 1 -const JSON_TYPE = 'unwalled.garden/discussion' -const JSON_PATH_REGEX = /^\/data\/discussions\/([^/]+)\.json$/i - -// typedefs -// = - -/** - * @typedef {import('../dat/library').InternalDatArchive} InternalDatArchive - * @typedef {import('./util').CrawlSourceRecord} CrawlSourceRecord - * @typedef { import("./site-descriptions").SiteDescription } SiteDescription - * - * @typedef {Object} Discussion - * @prop {string} pathname - * @prop {string} title - * @prop {string} body - * @prop {string} href - * @prop {string[]} tags - * @prop {string} createdAt - * @prop {string} updatedAt - * @prop {SiteDescription} author - * @prop {string} visibility - */ - -// globals -// = - -const events = new Events() -const ajv = (new Ajv()) -const validateDiscussion = ajv.compile(discussionSchema) - -// exported api -// = - -exports.on = events.on.bind(events) -exports.addListener = events.addListener.bind(events) -exports.removeListener = events.removeListener.bind(events) - -/** - * @description - * Crawl the given site for discussions. - * - * @param {InternalDatArchive} archive - site to crawl. - * @param {CrawlSourceRecord} crawlSource - internal metadata about the crawl target. - * @returns {Promise} - */ -exports.crawlSite = async function (archive, crawlSource) { - return doCrawl(archive, crawlSource, 'crawl_discussions', TABLE_VERSION, async ({changes, resetRequired}) => { - const supressEvents = resetRequired === true // dont emit when replaying old info - logger.silly('Crawling discussions', {details: {url: archive.url, numChanges: changes.length, resetRequired}}) - if (resetRequired) { - // reset all data - logger.debug('Resetting dataset', {details: {url: archive.url}}) - await db.run(` - DELETE FROM crawl_discussions WHERE crawlSourceId = ? - `, [crawlSource.id]) - await doCheckpoint('crawl_discussions', TABLE_VERSION, crawlSource, 0) - } - - // collect changed discussions - var changedDiscussions = getMatchingChangesInOrder(changes, JSON_PATH_REGEX) - if (changedDiscussions.length) { - logger.verbose('Collected new/changed discussion files', {details: {url: archive.url, changedDiscussions: changedDiscussions.map(p => p.name)}}) - } else { - logger.debug('No new discussion-files found', {details: {url: archive.url}}) - } - emitProgressEvent(archive.url, 'crawl_discussions', 0, changedDiscussions.length) - - // read and apply each discussion in order - var progress = 0 - for (let changedDiscussion of changedDiscussions) { - // TODO Currently the crawler will abort reading the feed if any discussion fails to load - // this means that a single unreachable file can stop the forward progress of discussion indexing - // to solve this, we need to find a way to tolerate unreachable discussion-files without losing our ability to efficiently detect new discussions - // -prf - if (changedDiscussion.type === 'del') { - // delete - await db.run(` - DELETE FROM crawl_discussions WHERE crawlSourceId = ? AND pathname = ? - `, [crawlSource.id, changedDiscussion.name]) - events.emit('discussion-removed', archive.url) - } else { - // read - let discussionString - try { - discussionString = await archive.pda.readFile(changedDiscussion.name, 'utf8') - } catch (err) { - logger.warn('Failed to read discussion file, aborting', {details: {url: archive.url, name: changedDiscussion.name, err}}) - return // abort indexing - } - - // parse and validate - let discussion - try { - discussion = JSON.parse(discussionString) - let valid = validateDiscussion(discussion) - if (!valid) throw ajv.errorsText(validateDiscussion.errors) - } catch (err) { - logger.warn('Failed to parse discussion file, skipping', {details: {url: archive.url, name: changedDiscussion.name, err}}) - continue // skip - } - - // massage the discussion - discussion.createdAt = Number(new Date(discussion.createdAt)) - discussion.updatedAt = Number(new Date(discussion.updatedAt)) - if (!discussion.title) discussion.title = '' // optional - if (!discussion.href) discussion.href = '' // optional - if (!discussion.tags) discussion.tags = [] // optional - if (isNaN(discussion.updatedAt)) discussion.updatedAt = 0 // optional - - // upsert - let discussionId = 0 - let existingDiscussion = await db.get(knex('crawl_discussions') - .select('id') - .where({ - crawlSourceId: crawlSource.id, - pathname: changedDiscussion.name - }) - ) - if (existingDiscussion) { - let res = await db.run(knex('crawl_discussions') - .where({ - crawlSourceId: crawlSource.id, - pathname: changedDiscussion.name - }).update({ - crawledAt: Date.now(), - title: discussion.title, - body: discussion.body, - href: discussion.href, - createdAt: discussion.createdAt, - updatedAt: discussion.updatedAt, - }) - ) - discussionId = existingDiscussion.id - events.emit('discussion-updated', archive.url) - } else { - let res = await db.run(knex('crawl_discussions') - .insert({ - crawlSourceId: crawlSource.id, - pathname: changedDiscussion.name, - crawledAt: Date.now(), - title: discussion.title, - body: discussion.body, - href: discussion.href, - createdAt: discussion.createdAt, - updatedAt: discussion.updatedAt, - }) - ) - discussionId = +res.lastID - events.emit('discussion-added', archive.url) - } - await db.run(`DELETE FROM crawl_discussions_tags WHERE crawlDiscussionId = ?`, [discussionId]) - for (let tag of discussion.tags) { - await db.run(`INSERT OR IGNORE INTO crawl_tags (tag) VALUES (?)`, [tag]) - let tagRow = await db.get(`SELECT id FROM crawl_tags WHERE tag = ?`, [tag]) - await db.run(`INSERT INTO crawl_discussions_tags (crawlDiscussionId, crawlTagId) VALUES (?, ?)`, [discussionId, tagRow.id]) - } - } - - // checkpoint our progress - await doCheckpoint('crawl_discussions', TABLE_VERSION, crawlSource, changedDiscussion.version) - emitProgressEvent(archive.url, 'crawl_discussions', ++progress, changedDiscussions.length) - } - logger.silly(`Finished crawling discussions`, {details: {url: archive.url}}) - }) -} - -/** - * @description - * List crawled discussions. - * - * @param {Object} [opts] - * @param {Object} [opts.filters] - * @param {string|string[]} [opts.filters.authors] - * @param {string|string[]} [opts.filters.tags] - * @param {string} [opts.filters.visibility] - * @param {string} [opts.sortBy] - * @param {number} [opts.offset=0] - * @param {number} [opts.limit] - * @param {boolean} [opts.reverse] - * @returns {Promise>} - */ -exports.list = async function (opts) { - // TODO: handle visibility - // TODO: sortBy options - - // validate & parse params - if (opts && 'sortBy' in opts) assert(typeof opts.sortBy === 'number', 'SortBy must be a string') - if (opts && 'offset' in opts) assert(typeof opts.offset === 'number', 'Offset must be a number') - if (opts && 'limit' in opts) assert(typeof opts.limit === 'number', 'Limit must be a number') - if (opts && 'reverse' in opts) assert(typeof opts.reverse === 'boolean', 'Reverse must be a boolean') - if (opts && opts.filters) { - if ('authors' in opts.filters) { - if (Array.isArray(opts.filters.authors)) { - assert(opts.filters.authors.every(v => typeof v === 'string'), 'Authors filter must be a string or array of strings') - } else { - assert(typeof opts.filters.authors === 'string', 'Authors filter must be a string or array of strings') - opts.filters.authors = [opts.filters.authors] - } - opts.filters.authors = await Promise.all(opts.filters.authors.map(datLibrary.getPrimaryUrl)) - } - if ('tags' in opts.filters) { - if (Array.isArray(opts.filters.tags)) { - assert(opts.filters.tags.every(v => typeof v === 'string'), 'Tags filter must be a string or array of strings') - } else { - assert(typeof opts.filters.tags === 'string', 'Tags filter must be a string or array of strings') - opts.filters.tags = [opts.filters.tags] - } - } - } - - // build query - var sql = knex('crawl_discussions') - .select('crawl_discussions.*') - .select('crawl_sources.url as crawlSourceUrl') - .select(knex.raw('group_concat(crawl_tags.tag, ",") as tags')) - .innerJoin('crawl_sources', 'crawl_sources.id', '=', 'crawl_discussions.crawlSourceId') - .leftJoin('crawl_discussions_tags', 'crawl_discussions_tags.crawlDiscussionId', '=', 'crawl_discussions.id') - .leftJoin('crawl_tags', 'crawl_discussions_tags.crawlTagId', '=', 'crawl_tags.id') - .groupBy('crawl_discussions.id') - .orderBy('crawl_discussions.createdAt', opts.reverse ? 'DESC' : 'ASC') - if (opts && opts.filters && opts.filters.authors) { - sql = sql.whereIn('crawl_sources.url', opts.filters.authors) - } - if (opts && opts.limit) sql = sql.limit(opts.limit) - if (opts && opts.offset) sql = sql.offset(opts.offset) - - // execute query - var rows = await db.all(sql) - var discussions = await Promise.all(rows.map(massageDiscussionRow)) - - // apply tags filter - if (opts && opts.filters && opts.filters.tags) { - const someFn = t => opts.filters.tags.includes(t) - discussions = discussions.filter(discussion => discussion.tags.some(someFn)) - } - - return discussions -} - -/** - * @description - * Get crawled discussion. - * - * @param {string} url - The URL of the discussion - * @returns {Promise} - */ -const get = exports.get = async function (url) { - // validate & parse params - var urlParsed - if (url) { - try { urlParsed = new URL(url) } - catch (e) { throw new Error('Invalid URL: ' + url) } - } - - // build query - var sql = knex('crawl_discussions') - .select('crawl_discussions.*') - .select('crawl_sources.url as crawlSourceUrl') - .select(knex.raw('group_concat(crawl_tags.tag, ",") as tags')) - .innerJoin('crawl_sources', function () { - this.on('crawl_sources.id', '=', 'crawl_discussions.crawlSourceId') - .andOn('crawl_sources.url', '=', knex.raw('?', `${urlParsed.protocol}//${urlParsed.hostname}`)) - }) - .leftJoin('crawl_discussions_tags', 'crawl_discussions_tags.crawlDiscussionId', '=', 'crawl_discussions.id') - .leftJoin('crawl_tags', 'crawl_tags.id', '=', 'crawl_discussions_tags.crawlTagId') - .where('crawl_discussions.pathname', urlParsed.pathname) - .groupBy('crawl_discussions.id') - - // execute query - return await massageDiscussionRow(await db.get(sql)) -} - -/** - * @description - * Create a new discussion. - * - * @param {InternalDatArchive} archive - where to write the discussion to. - * @param {Object} discussion - * @param {string} discussion.title - * @param {string} discussion.body - * @param {string} discussion.href - * @param {string[]} discussion.tags - * @param {string} discussion.visibility - * @returns {Promise} url - */ -exports.add = async function (archive, discussion) { - // TODO visibility - - var discussionObject = { - type: JSON_TYPE, - title: discussion.title, - body: discussion.body, - href: discussion.href, - tags: discussion.tags, - createdAt: (new Date()).toISOString() - } - var valid = validateDiscussion(discussionObject) - if (!valid) throw ajv.errorsText(validateDiscussion.errors) - - var filename = generateTimeFilename() - var filepath = `/data/discussions/${filename}.json` - await ensureDirectory(archive, '/data') - await ensureDirectory(archive, '/data/discussions') - await archive.pda.writeFile(filepath, JSON.stringify(discussionObject, null, 2)) - await crawler.crawlSite(archive) - return archive.url + filepath -} - -/** - * @description - * Update the content of an existing discussion. - * - * @param {InternalDatArchive} archive - where to write the discussion to. - * @param {string} pathname - the pathname of the discussion. - * @param {Object} discussion - * @param {string} [discussion.title] - * @param {string} [discussion.body] - * @param {string} [discussion.href] - * @param {string[]} [discussion.tags] - * @param {string} [discussion.visibility] - * @returns {Promise} - */ -exports.edit = async function (archive, pathname, discussion) { - // TODO visibility - - var release = await lock('crawler:discussions:' + archive.url) - try { - // fetch discussion - var existingDiscussion = await get(archive.url + pathname) - if (!existingDiscussion) throw new Error('Discussion not found') - - // update discussion content - var discussionObject = { - type: JSON_TYPE, - title: ('title' in discussion) ? discussion.title : existingDiscussion.title, - body: ('body' in discussion) ? discussion.body : existingDiscussion.body, - href: ('href' in discussion) ? discussion.href : existingDiscussion.href, - tags: ('tags' in discussion) ? discussion.tags : existingDiscussion.tags, - createdAt: existingDiscussion.createdAt, - updatedAt: (new Date()).toISOString() - } - - // validate - var valid = validateDiscussion(discussionObject) - if (!valid) throw ajv.errorsText(validateDiscussion.errors) - - // write - await archive.pda.writeFile(pathname, JSON.stringify(discussionObject, null, 2)) - await crawler.crawlSite(archive) - } finally { - release() - } -} - -/** - * @description - * Delete an existing discussion - * - * @param {InternalDatArchive} archive - where to write the discussion to. - * @param {string} pathname - the pathname of the discussion. - * @returns {Promise} - */ -exports.remove = async function (archive, pathname) { - assert(typeof pathname === 'string', 'Remove() must be provided a valid URL string') - await archive.pda.unlink(pathname) - await crawler.crawlSite(archive) -} - -// internal methods -// = - -/** - * @param {Object} row - * @returns {Promise} - */ -async function massageDiscussionRow (row) { - if (!row) return null - var author = await siteDescriptions.getBest({subject: row.crawlSourceUrl}) - if (!author) { - author = { - url: row.crawlSourceUrl, - title: '', - description: '', - type: [], - thumbUrl: `${row.crawlSourceUrl}/thumb`, - descAuthor: {url: null} - } - } - return { - pathname: row.pathname, - author, - title: row.title, - body: row.body, - href: row.href, - tags: row.tags ? row.tags.split(',').filter(Boolean) : [], - createdAt: new Date(row.createdAt).toISOString(), - updatedAt: row.updatedAt ? new Date(row.updatedAt).toISOString() : null, - visibility: 'public' // TODO visibility - } -} diff --git a/crawler/json-schemas/discussion.js b/crawler/json-schemas/discussion.js deleted file mode 100644 index ba561eea..00000000 --- a/crawler/json-schemas/discussion.js +++ /dev/null @@ -1,43 +0,0 @@ -module.exports = { - '$schema': 'http://json-schema.org/draft-07/schema#', - '$id': 'dat://unwalled.garden/discussion.json', - 'type': 'object', - 'title': 'Discussion', - 'description': 'A forum discussion.', - 'required': ['type', 'title', 'createdAt'], - 'properties': { - 'type': { - 'type': 'string', - 'description': "The object's type", - 'const': 'unwalled.garden/discussion' - }, - 'title': { - 'type': 'string', - 'maxLength': 280 - }, - 'body': { - 'type': 'string', - 'maxLength': 1000000 - }, - 'href': { - 'type': 'string', - 'format': 'uri' - }, - 'tags': { - 'type': 'array', - 'items': { - 'type': 'string', - 'maxLength': 100, - 'pattern': '^[A-Za-z][A-Za-z0-9-_?]*$' - } - }, - 'createdAt': { - 'type': 'string', - 'format': 'date-time' - }, - 'updatedAt': { - 'type': 'string', - 'format': 'date-time' - } - } -} \ No newline at end of file diff --git a/crawler/json-schemas/media.js b/crawler/json-schemas/media.js deleted file mode 100644 index c904f02c..00000000 --- a/crawler/json-schemas/media.js +++ /dev/null @@ -1,52 +0,0 @@ -module.exports = { - '$schema': 'http://json-schema.org/draft-07/schema#', - '$id': 'dat://unwalled.garden/media.json', - 'type': 'object', - 'title': 'media', - 'description': 'A published item of content.', - 'required': [ - 'type', - 'subtype', - 'href', - 'title', - 'createdAt' - ], - 'properties': { - 'type': { - 'type': 'string', - 'const': 'unwalled.garden/media' - }, - 'subtype': { - 'type': 'string' - }, - 'href': { - 'type': 'string', - 'format': 'uri' - }, - 'title': { - 'type': 'string' - }, - 'description': { - 'type': 'string' - }, - 'tags': { - 'type': 'array', - 'items': { - 'type': 'string', - 'maxLength': 100, - 'pattern': '^[A-Za-z][A-Za-z0-9-_?]*$' - } - }, - 'createdAt': { - 'type': 'string', - 'format': 'date-time' - }, - 'updatedAt': { - 'type': 'string', - 'format': 'date-time' - }, - 'ext': { - 'type': 'object' - } - } -} \ No newline at end of file diff --git a/crawler/json-schemas/reaction.js b/crawler/json-schemas/reaction.js deleted file mode 100644 index b27b7cf6..00000000 --- a/crawler/json-schemas/reaction.js +++ /dev/null @@ -1,3873 +0,0 @@ -module.exports = { - '$schema': 'http://json-schema.org/draft-07/schema#', - '$id': 'dat://unwalled.garden/reaction.json', - 'type': 'object', - 'title': 'Reaction', - 'description': 'An emoji annotation on some resource.', - 'required': [ - 'type', - 'topic', - 'emojis' - ], - 'properties': { - 'type': { - 'type': 'string', - 'description': "The object's type", - 'const': 'unwalled.garden/reaction' - }, - 'topic': { - 'type': 'string', - 'description': 'What this reaction is about', - 'format': 'uri', - 'examples': [ - 'dat://beakerbrowser.com' - ] - }, - 'emojis': { - 'type': 'array', - 'description': 'The reaction emojis. Must contain supported emojis.', - 'items': { - 'type': 'string', - 'enum': [ - '๐Ÿ˜€', - '๐Ÿ˜ƒ', - '๐Ÿ˜„', - '๐Ÿ˜', - '๐Ÿ˜†', - '๐Ÿ˜…', - '๐Ÿคฃ', - '๐Ÿ˜‚', - '๐Ÿ™‚', - '๐Ÿ™ƒ', - '๐Ÿ˜‰', - '๐Ÿ˜Š', - '๐Ÿ˜‡', - '๐Ÿฅฐ', - '๐Ÿ˜', - '๐Ÿคฉ', - '๐Ÿ˜˜', - '๐Ÿ˜—', - 'โ˜บ๏ธ', - 'โ˜บ', - '๐Ÿ˜š', - '๐Ÿ˜™', - '๐Ÿ˜‹', - '๐Ÿ˜›', - '๐Ÿ˜œ', - '๐Ÿคช', - '๐Ÿ˜', - '๐Ÿค‘', - '๐Ÿค—', - '๐Ÿคญ', - '๐Ÿคซ', - '๐Ÿค”', - '๐Ÿค', - '๐Ÿคจ', - '๐Ÿ˜', - '๐Ÿ˜‘', - '๐Ÿ˜ถ', - '๐Ÿ˜', - '๐Ÿ˜’', - '๐Ÿ™„', - '๐Ÿ˜ฌ', - '๐Ÿคฅ', - '๐Ÿ˜Œ', - '๐Ÿ˜”', - '๐Ÿ˜ช', - '๐Ÿคค', - '๐Ÿ˜ด', - '๐Ÿ˜ท', - '๐Ÿค’', - '๐Ÿค•', - '๐Ÿคข', - '๐Ÿคฎ', - '๐Ÿคง', - '๐Ÿฅต', - '๐Ÿฅถ', - '๐Ÿฅด', - '๐Ÿ˜ต', - '๐Ÿคฏ', - '๐Ÿค ', - '๐Ÿฅณ', - '๐Ÿ˜Ž', - '๐Ÿค“', - '๐Ÿง', - '๐Ÿ˜•', - '๐Ÿ˜Ÿ', - '๐Ÿ™', - 'โ˜น๏ธ', - 'โ˜น', - '๐Ÿ˜ฎ', - '๐Ÿ˜ฏ', - '๐Ÿ˜ฒ', - '๐Ÿ˜ณ', - '๐Ÿฅบ', - '๐Ÿ˜ฆ', - '๐Ÿ˜ง', - '๐Ÿ˜จ', - '๐Ÿ˜ฐ', - '๐Ÿ˜ฅ', - '๐Ÿ˜ข', - '๐Ÿ˜ญ', - '๐Ÿ˜ฑ', - '๐Ÿ˜–', - '๐Ÿ˜ฃ', - '๐Ÿ˜ž', - '๐Ÿ˜“', - '๐Ÿ˜ฉ', - '๐Ÿ˜ซ', - '๐Ÿฅฑ', - '๐Ÿ˜ค', - '๐Ÿ˜ก', - '๐Ÿ˜ ', - '๐Ÿคฌ', - '๐Ÿ˜ˆ', - '๐Ÿ‘ฟ', - '๐Ÿ’€', - 'โ˜ ๏ธ', - 'โ˜ ', - '๐Ÿ’ฉ', - '๐Ÿคก', - '๐Ÿ‘น', - '๐Ÿ‘บ', - '๐Ÿ‘ป', - '๐Ÿ‘ฝ', - '๐Ÿ‘พ', - '๐Ÿค–', - '๐Ÿ˜บ', - '๐Ÿ˜ธ', - '๐Ÿ˜น', - '๐Ÿ˜ป', - '๐Ÿ˜ผ', - '๐Ÿ˜ฝ', - '๐Ÿ™€', - '๐Ÿ˜ฟ', - '๐Ÿ˜พ', - '๐Ÿ™ˆ', - '๐Ÿ™‰', - '๐Ÿ™Š', - '๐Ÿ’‹', - '๐Ÿ’Œ', - '๐Ÿ’˜', - '๐Ÿ’', - '๐Ÿ’–', - '๐Ÿ’—', - '๐Ÿ’“', - '๐Ÿ’ž', - '๐Ÿ’•', - '๐Ÿ’Ÿ', - 'โฃ๏ธ', - 'โฃ', - '๐Ÿ’”', - 'โค๏ธ', - 'โค', - '๐Ÿงก', - '๐Ÿ’›', - '๐Ÿ’š', - '๐Ÿ’™', - '๐Ÿ’œ', - '๐ŸคŽ', - '๐Ÿ–ค', - '๐Ÿค', - '๐Ÿ’ฏ', - '๐Ÿ’ข', - '๐Ÿ’ฅ', - '๐Ÿ’ซ', - '๐Ÿ’ฆ', - '๐Ÿ’จ', - '๐Ÿ•ณ๏ธ', - '๐Ÿ•ณ', - '๐Ÿ’ฃ', - '๐Ÿ’ฌ', - '๐Ÿ‘๏ธโ€๐Ÿ—จ๏ธ', - '๐Ÿ‘โ€๐Ÿ—จ๏ธ', - '๐Ÿ‘๏ธโ€๐Ÿ—จ', - '๐Ÿ‘โ€๐Ÿ—จ', - '๐Ÿ—จ๏ธ', - '๐Ÿ—จ', - '๐Ÿ—ฏ๏ธ', - '๐Ÿ—ฏ', - '๐Ÿ’ญ', - '๐Ÿ’ค', - '๐Ÿ‘‹', - '๐Ÿ‘‹๐Ÿป', - '๐Ÿ‘‹๐Ÿผ', - '๐Ÿ‘‹๐Ÿฝ', - '๐Ÿ‘‹๐Ÿพ', - '๐Ÿ‘‹๐Ÿฟ', - '๐Ÿคš', - '๐Ÿคš๐Ÿป', - '๐Ÿคš๐Ÿผ', - '๐Ÿคš๐Ÿฝ', - '๐Ÿคš๐Ÿพ', - '๐Ÿคš๐Ÿฟ', - '๐Ÿ–๏ธ', - '๐Ÿ–', - '๐Ÿ–๐Ÿป', - '๐Ÿ–๐Ÿผ', - '๐Ÿ–๐Ÿฝ', - '๐Ÿ–๐Ÿพ', - '๐Ÿ–๐Ÿฟ', - 'โœ‹', - 'โœ‹๐Ÿป', - 'โœ‹๐Ÿผ', - 'โœ‹๐Ÿฝ', - 'โœ‹๐Ÿพ', - 'โœ‹๐Ÿฟ', - '๐Ÿ––', - '๐Ÿ––๐Ÿป', - '๐Ÿ––๐Ÿผ', - '๐Ÿ––๐Ÿฝ', - '๐Ÿ––๐Ÿพ', - '๐Ÿ––๐Ÿฟ', - '๐Ÿ‘Œ', - '๐Ÿ‘Œ๐Ÿป', - '๐Ÿ‘Œ๐Ÿผ', - '๐Ÿ‘Œ๐Ÿฝ', - '๐Ÿ‘Œ๐Ÿพ', - '๐Ÿ‘Œ๐Ÿฟ', - '๐Ÿค', - '๐Ÿค๐Ÿป', - '๐Ÿค๐Ÿผ', - '๐Ÿค๐Ÿฝ', - '๐Ÿค๐Ÿพ', - '๐Ÿค๐Ÿฟ', - 'โœŒ๏ธ', - 'โœŒ', - 'โœŒ๐Ÿป', - 'โœŒ๐Ÿผ', - 'โœŒ๐Ÿฝ', - 'โœŒ๐Ÿพ', - 'โœŒ๐Ÿฟ', - '๐Ÿคž', - '๐Ÿคž๐Ÿป', - '๐Ÿคž๐Ÿผ', - '๐Ÿคž๐Ÿฝ', - '๐Ÿคž๐Ÿพ', - '๐Ÿคž๐Ÿฟ', - '๐ŸคŸ', - '๐ŸคŸ๐Ÿป', - '๐ŸคŸ๐Ÿผ', - '๐ŸคŸ๐Ÿฝ', - '๐ŸคŸ๐Ÿพ', - '๐ŸคŸ๐Ÿฟ', - '๐Ÿค˜', - '๐Ÿค˜๐Ÿป', - '๐Ÿค˜๐Ÿผ', - '๐Ÿค˜๐Ÿฝ', - '๐Ÿค˜๐Ÿพ', - '๐Ÿค˜๐Ÿฟ', - '๐Ÿค™', - '๐Ÿค™๐Ÿป', - '๐Ÿค™๐Ÿผ', - '๐Ÿค™๐Ÿฝ', - '๐Ÿค™๐Ÿพ', - '๐Ÿค™๐Ÿฟ', - '๐Ÿ‘ˆ', - '๐Ÿ‘ˆ๐Ÿป', - '๐Ÿ‘ˆ๐Ÿผ', - '๐Ÿ‘ˆ๐Ÿฝ', - '๐Ÿ‘ˆ๐Ÿพ', - '๐Ÿ‘ˆ๐Ÿฟ', - '๐Ÿ‘‰', - '๐Ÿ‘‰๐Ÿป', - '๐Ÿ‘‰๐Ÿผ', - '๐Ÿ‘‰๐Ÿฝ', - '๐Ÿ‘‰๐Ÿพ', - '๐Ÿ‘‰๐Ÿฟ', - '๐Ÿ‘†', - '๐Ÿ‘†๐Ÿป', - '๐Ÿ‘†๐Ÿผ', - '๐Ÿ‘†๐Ÿฝ', - '๐Ÿ‘†๐Ÿพ', - '๐Ÿ‘†๐Ÿฟ', - '๐Ÿ–•', - '๐Ÿ–•๐Ÿป', - '๐Ÿ–•๐Ÿผ', - '๐Ÿ–•๐Ÿฝ', - '๐Ÿ–•๐Ÿพ', - '๐Ÿ–•๐Ÿฟ', - '๐Ÿ‘‡', - '๐Ÿ‘‡๐Ÿป', - '๐Ÿ‘‡๐Ÿผ', - '๐Ÿ‘‡๐Ÿฝ', - '๐Ÿ‘‡๐Ÿพ', - '๐Ÿ‘‡๐Ÿฟ', - 'โ˜๏ธ', - 'โ˜', - 'โ˜๐Ÿป', - 'โ˜๐Ÿผ', - 'โ˜๐Ÿฝ', - 'โ˜๐Ÿพ', - 'โ˜๐Ÿฟ', - '๐Ÿ‘', - '๐Ÿ‘๐Ÿป', - '๐Ÿ‘๐Ÿผ', - '๐Ÿ‘๐Ÿฝ', - '๐Ÿ‘๐Ÿพ', - '๐Ÿ‘๐Ÿฟ', - '๐Ÿ‘Ž', - '๐Ÿ‘Ž๐Ÿป', - '๐Ÿ‘Ž๐Ÿผ', - '๐Ÿ‘Ž๐Ÿฝ', - '๐Ÿ‘Ž๐Ÿพ', - '๐Ÿ‘Ž๐Ÿฟ', - 'โœŠ', - 'โœŠ๐Ÿป', - 'โœŠ๐Ÿผ', - 'โœŠ๐Ÿฝ', - 'โœŠ๐Ÿพ', - 'โœŠ๐Ÿฟ', - '๐Ÿ‘Š', - '๐Ÿ‘Š๐Ÿป', - '๐Ÿ‘Š๐Ÿผ', - '๐Ÿ‘Š๐Ÿฝ', - '๐Ÿ‘Š๐Ÿพ', - '๐Ÿ‘Š๐Ÿฟ', - '๐Ÿค›', - '๐Ÿค›๐Ÿป', - '๐Ÿค›๐Ÿผ', - '๐Ÿค›๐Ÿฝ', - '๐Ÿค›๐Ÿพ', - '๐Ÿค›๐Ÿฟ', - '๐Ÿคœ', - '๐Ÿคœ๐Ÿป', - '๐Ÿคœ๐Ÿผ', - '๐Ÿคœ๐Ÿฝ', - '๐Ÿคœ๐Ÿพ', - '๐Ÿคœ๐Ÿฟ', - '๐Ÿ‘', - '๐Ÿ‘๐Ÿป', - '๐Ÿ‘๐Ÿผ', - '๐Ÿ‘๐Ÿฝ', - '๐Ÿ‘๐Ÿพ', - '๐Ÿ‘๐Ÿฟ', - '๐Ÿ™Œ', - '๐Ÿ™Œ๐Ÿป', - '๐Ÿ™Œ๐Ÿผ', - '๐Ÿ™Œ๐Ÿฝ', - '๐Ÿ™Œ๐Ÿพ', - '๐Ÿ™Œ๐Ÿฟ', - '๐Ÿ‘', - '๐Ÿ‘๐Ÿป', - '๐Ÿ‘๐Ÿผ', - '๐Ÿ‘๐Ÿฝ', - '๐Ÿ‘๐Ÿพ', - '๐Ÿ‘๐Ÿฟ', - '๐Ÿคฒ', - '๐Ÿคฒ๐Ÿป', - '๐Ÿคฒ๐Ÿผ', - '๐Ÿคฒ๐Ÿฝ', - '๐Ÿคฒ๐Ÿพ', - '๐Ÿคฒ๐Ÿฟ', - '๐Ÿค', - '๐Ÿ™', - '๐Ÿ™๐Ÿป', - '๐Ÿ™๐Ÿผ', - '๐Ÿ™๐Ÿฝ', - '๐Ÿ™๐Ÿพ', - '๐Ÿ™๐Ÿฟ', - 'โœ๏ธ', - 'โœ', - 'โœ๐Ÿป', - 'โœ๐Ÿผ', - 'โœ๐Ÿฝ', - 'โœ๐Ÿพ', - 'โœ๐Ÿฟ', - '๐Ÿ’…', - '๐Ÿ’…๐Ÿป', - '๐Ÿ’…๐Ÿผ', - '๐Ÿ’…๐Ÿฝ', - '๐Ÿ’…๐Ÿพ', - '๐Ÿ’…๐Ÿฟ', - '๐Ÿคณ', - '๐Ÿคณ๐Ÿป', - '๐Ÿคณ๐Ÿผ', - '๐Ÿคณ๐Ÿฝ', - '๐Ÿคณ๐Ÿพ', - '๐Ÿคณ๐Ÿฟ', - '๐Ÿ’ช', - '๐Ÿ’ช๐Ÿป', - '๐Ÿ’ช๐Ÿผ', - '๐Ÿ’ช๐Ÿฝ', - '๐Ÿ’ช๐Ÿพ', - '๐Ÿ’ช๐Ÿฟ', - '๐Ÿฆพ', - '๐Ÿฆฟ', - '๐Ÿฆต', - '๐Ÿฆต๐Ÿป', - '๐Ÿฆต๐Ÿผ', - '๐Ÿฆต๐Ÿฝ', - '๐Ÿฆต๐Ÿพ', - '๐Ÿฆต๐Ÿฟ', - '๐Ÿฆถ', - '๐Ÿฆถ๐Ÿป', - '๐Ÿฆถ๐Ÿผ', - '๐Ÿฆถ๐Ÿฝ', - '๐Ÿฆถ๐Ÿพ', - '๐Ÿฆถ๐Ÿฟ', - '๐Ÿ‘‚', - '๐Ÿ‘‚๐Ÿป', - '๐Ÿ‘‚๐Ÿผ', - '๐Ÿ‘‚๐Ÿฝ', - '๐Ÿ‘‚๐Ÿพ', - '๐Ÿ‘‚๐Ÿฟ', - '๐Ÿฆป', - '๐Ÿฆป๐Ÿป', - '๐Ÿฆป๐Ÿผ', - '๐Ÿฆป๐Ÿฝ', - '๐Ÿฆป๐Ÿพ', - '๐Ÿฆป๐Ÿฟ', - '๐Ÿ‘ƒ', - '๐Ÿ‘ƒ๐Ÿป', - '๐Ÿ‘ƒ๐Ÿผ', - '๐Ÿ‘ƒ๐Ÿฝ', - '๐Ÿ‘ƒ๐Ÿพ', - '๐Ÿ‘ƒ๐Ÿฟ', - '๐Ÿง ', - '๐Ÿฆท', - '๐Ÿฆด', - '๐Ÿ‘€', - '๐Ÿ‘๏ธ', - '๐Ÿ‘', - '๐Ÿ‘…', - '๐Ÿ‘„', - '๐Ÿ‘ถ', - '๐Ÿ‘ถ๐Ÿป', - '๐Ÿ‘ถ๐Ÿผ', - '๐Ÿ‘ถ๐Ÿฝ', - '๐Ÿ‘ถ๐Ÿพ', - '๐Ÿ‘ถ๐Ÿฟ', - '๐Ÿง’', - '๐Ÿง’๐Ÿป', - '๐Ÿง’๐Ÿผ', - '๐Ÿง’๐Ÿฝ', - '๐Ÿง’๐Ÿพ', - '๐Ÿง’๐Ÿฟ', - '๐Ÿ‘ฆ', - '๐Ÿ‘ฆ๐Ÿป', - '๐Ÿ‘ฆ๐Ÿผ', - '๐Ÿ‘ฆ๐Ÿฝ', - '๐Ÿ‘ฆ๐Ÿพ', - '๐Ÿ‘ฆ๐Ÿฟ', - '๐Ÿ‘ง', - '๐Ÿ‘ง๐Ÿป', - '๐Ÿ‘ง๐Ÿผ', - '๐Ÿ‘ง๐Ÿฝ', - '๐Ÿ‘ง๐Ÿพ', - '๐Ÿ‘ง๐Ÿฟ', - '๐Ÿง‘', - '๐Ÿง‘๐Ÿป', - '๐Ÿง‘๐Ÿผ', - '๐Ÿง‘๐Ÿฝ', - '๐Ÿง‘๐Ÿพ', - '๐Ÿง‘๐Ÿฟ', - '๐Ÿ‘ฑ', - '๐Ÿ‘ฑ๐Ÿป', - '๐Ÿ‘ฑ๐Ÿผ', - '๐Ÿ‘ฑ๐Ÿฝ', - '๐Ÿ‘ฑ๐Ÿพ', - '๐Ÿ‘ฑ๐Ÿฟ', - '๐Ÿ‘จ', - '๐Ÿ‘จ๐Ÿป', - '๐Ÿ‘จ๐Ÿผ', - '๐Ÿ‘จ๐Ÿฝ', - '๐Ÿ‘จ๐Ÿพ', - '๐Ÿ‘จ๐Ÿฟ', - '๐Ÿง”', - '๐Ÿง”๐Ÿป', - '๐Ÿง”๐Ÿผ', - '๐Ÿง”๐Ÿฝ', - '๐Ÿง”๐Ÿพ', - '๐Ÿง”๐Ÿฟ', - '๐Ÿ‘ฑโ€โ™‚๏ธ', - '๐Ÿ‘ฑโ€โ™‚', - '๐Ÿ‘ฑ๐Ÿปโ€โ™‚๏ธ', - '๐Ÿ‘ฑ๐Ÿปโ€โ™‚', - '๐Ÿ‘ฑ๐Ÿผโ€โ™‚๏ธ', - '๐Ÿ‘ฑ๐Ÿผโ€โ™‚', - '๐Ÿ‘ฑ๐Ÿฝโ€โ™‚๏ธ', - '๐Ÿ‘ฑ๐Ÿฝโ€โ™‚', - '๐Ÿ‘ฑ๐Ÿพโ€โ™‚๏ธ', - '๐Ÿ‘ฑ๐Ÿพโ€โ™‚', - '๐Ÿ‘ฑ๐Ÿฟโ€โ™‚๏ธ', - '๐Ÿ‘ฑ๐Ÿฟโ€โ™‚', - '๐Ÿ‘จโ€๐Ÿฆฐ', - '๐Ÿ‘จ๐Ÿปโ€๐Ÿฆฐ', - '๐Ÿ‘จ๐Ÿผโ€๐Ÿฆฐ', - '๐Ÿ‘จ๐Ÿฝโ€๐Ÿฆฐ', - '๐Ÿ‘จ๐Ÿพโ€๐Ÿฆฐ', - '๐Ÿ‘จ๐Ÿฟโ€๐Ÿฆฐ', - '๐Ÿ‘จโ€๐Ÿฆฑ', - '๐Ÿ‘จ๐Ÿปโ€๐Ÿฆฑ', - '๐Ÿ‘จ๐Ÿผโ€๐Ÿฆฑ', - '๐Ÿ‘จ๐Ÿฝโ€๐Ÿฆฑ', - '๐Ÿ‘จ๐Ÿพโ€๐Ÿฆฑ', - '๐Ÿ‘จ๐Ÿฟโ€๐Ÿฆฑ', - '๐Ÿ‘จโ€๐Ÿฆณ', - '๐Ÿ‘จ๐Ÿปโ€๐Ÿฆณ', - '๐Ÿ‘จ๐Ÿผโ€๐Ÿฆณ', - '๐Ÿ‘จ๐Ÿฝโ€๐Ÿฆณ', - '๐Ÿ‘จ๐Ÿพโ€๐Ÿฆณ', - '๐Ÿ‘จ๐Ÿฟโ€๐Ÿฆณ', - '๐Ÿ‘จโ€๐Ÿฆฒ', - '๐Ÿ‘จ๐Ÿปโ€๐Ÿฆฒ', - '๐Ÿ‘จ๐Ÿผโ€๐Ÿฆฒ', - '๐Ÿ‘จ๐Ÿฝโ€๐Ÿฆฒ', - '๐Ÿ‘จ๐Ÿพโ€๐Ÿฆฒ', - '๐Ÿ‘จ๐Ÿฟโ€๐Ÿฆฒ', - '๐Ÿ‘ฉ', - '๐Ÿ‘ฉ๐Ÿป', - '๐Ÿ‘ฉ๐Ÿผ', - '๐Ÿ‘ฉ๐Ÿฝ', - '๐Ÿ‘ฉ๐Ÿพ', - '๐Ÿ‘ฉ๐Ÿฟ', - '๐Ÿ‘ฑโ€โ™€๏ธ', - '๐Ÿ‘ฑโ€โ™€', - '๐Ÿ‘ฑ๐Ÿปโ€โ™€๏ธ', - '๐Ÿ‘ฑ๐Ÿปโ€โ™€', - '๐Ÿ‘ฑ๐Ÿผโ€โ™€๏ธ', - '๐Ÿ‘ฑ๐Ÿผโ€โ™€', - '๐Ÿ‘ฑ๐Ÿฝโ€โ™€๏ธ', - '๐Ÿ‘ฑ๐Ÿฝโ€โ™€', - '๐Ÿ‘ฑ๐Ÿพโ€โ™€๏ธ', - '๐Ÿ‘ฑ๐Ÿพโ€โ™€', - '๐Ÿ‘ฑ๐Ÿฟโ€โ™€๏ธ', - '๐Ÿ‘ฑ๐Ÿฟโ€โ™€', - '๐Ÿ‘ฉโ€๐Ÿฆฐ', - '๐Ÿ‘ฉ๐Ÿปโ€๐Ÿฆฐ', - '๐Ÿ‘ฉ๐Ÿผโ€๐Ÿฆฐ', - '๐Ÿ‘ฉ๐Ÿฝโ€๐Ÿฆฐ', - '๐Ÿ‘ฉ๐Ÿพโ€๐Ÿฆฐ', - '๐Ÿ‘ฉ๐Ÿฟโ€๐Ÿฆฐ', - '๐Ÿ‘ฉโ€๐Ÿฆฑ', - '๐Ÿ‘ฉ๐Ÿปโ€๐Ÿฆฑ', - '๐Ÿ‘ฉ๐Ÿผโ€๐Ÿฆฑ', - '๐Ÿ‘ฉ๐Ÿฝโ€๐Ÿฆฑ', - '๐Ÿ‘ฉ๐Ÿพโ€๐Ÿฆฑ', - '๐Ÿ‘ฉ๐Ÿฟโ€๐Ÿฆฑ', - '๐Ÿ‘ฉโ€๐Ÿฆณ', - '๐Ÿ‘ฉ๐Ÿปโ€๐Ÿฆณ', - '๐Ÿ‘ฉ๐Ÿผโ€๐Ÿฆณ', - '๐Ÿ‘ฉ๐Ÿฝโ€๐Ÿฆณ', - '๐Ÿ‘ฉ๐Ÿพโ€๐Ÿฆณ', - '๐Ÿ‘ฉ๐Ÿฟโ€๐Ÿฆณ', - '๐Ÿ‘ฉโ€๐Ÿฆฒ', - '๐Ÿ‘ฉ๐Ÿปโ€๐Ÿฆฒ', - '๐Ÿ‘ฉ๐Ÿผโ€๐Ÿฆฒ', - '๐Ÿ‘ฉ๐Ÿฝโ€๐Ÿฆฒ', - '๐Ÿ‘ฉ๐Ÿพโ€๐Ÿฆฒ', - '๐Ÿ‘ฉ๐Ÿฟโ€๐Ÿฆฒ', - '๐Ÿง“', - '๐Ÿง“๐Ÿป', - '๐Ÿง“๐Ÿผ', - '๐Ÿง“๐Ÿฝ', - '๐Ÿง“๐Ÿพ', - '๐Ÿง“๐Ÿฟ', - '๐Ÿ‘ด', - '๐Ÿ‘ด๐Ÿป', - '๐Ÿ‘ด๐Ÿผ', - '๐Ÿ‘ด๐Ÿฝ', - '๐Ÿ‘ด๐Ÿพ', - '๐Ÿ‘ด๐Ÿฟ', - '๐Ÿ‘ต', - '๐Ÿ‘ต๐Ÿป', - '๐Ÿ‘ต๐Ÿผ', - '๐Ÿ‘ต๐Ÿฝ', - '๐Ÿ‘ต๐Ÿพ', - '๐Ÿ‘ต๐Ÿฟ', - '๐Ÿ™', - '๐Ÿ™๐Ÿป', - '๐Ÿ™๐Ÿผ', - '๐Ÿ™๐Ÿฝ', - '๐Ÿ™๐Ÿพ', - '๐Ÿ™๐Ÿฟ', - '๐Ÿ™โ€โ™‚๏ธ', - '๐Ÿ™โ€โ™‚', - '๐Ÿ™๐Ÿปโ€โ™‚๏ธ', - '๐Ÿ™๐Ÿปโ€โ™‚', - '๐Ÿ™๐Ÿผโ€โ™‚๏ธ', - '๐Ÿ™๐Ÿผโ€โ™‚', - '๐Ÿ™๐Ÿฝโ€โ™‚๏ธ', - '๐Ÿ™๐Ÿฝโ€โ™‚', - '๐Ÿ™๐Ÿพโ€โ™‚๏ธ', - '๐Ÿ™๐Ÿพโ€โ™‚', - '๐Ÿ™๐Ÿฟโ€โ™‚๏ธ', - '๐Ÿ™๐Ÿฟโ€โ™‚', - '๐Ÿ™โ€โ™€๏ธ', - '๐Ÿ™โ€โ™€', - '๐Ÿ™๐Ÿปโ€โ™€๏ธ', - '๐Ÿ™๐Ÿปโ€โ™€', - '๐Ÿ™๐Ÿผโ€โ™€๏ธ', - '๐Ÿ™๐Ÿผโ€โ™€', - '๐Ÿ™๐Ÿฝโ€โ™€๏ธ', - '๐Ÿ™๐Ÿฝโ€โ™€', - '๐Ÿ™๐Ÿพโ€โ™€๏ธ', - '๐Ÿ™๐Ÿพโ€โ™€', - '๐Ÿ™๐Ÿฟโ€โ™€๏ธ', - '๐Ÿ™๐Ÿฟโ€โ™€', - '๐Ÿ™Ž', - '๐Ÿ™Ž๐Ÿป', - '๐Ÿ™Ž๐Ÿผ', - '๐Ÿ™Ž๐Ÿฝ', - '๐Ÿ™Ž๐Ÿพ', - '๐Ÿ™Ž๐Ÿฟ', - '๐Ÿ™Žโ€โ™‚๏ธ', - '๐Ÿ™Žโ€โ™‚', - '๐Ÿ™Ž๐Ÿปโ€โ™‚๏ธ', - '๐Ÿ™Ž๐Ÿปโ€โ™‚', - '๐Ÿ™Ž๐Ÿผโ€โ™‚๏ธ', - '๐Ÿ™Ž๐Ÿผโ€โ™‚', - '๐Ÿ™Ž๐Ÿฝโ€โ™‚๏ธ', - '๐Ÿ™Ž๐Ÿฝโ€โ™‚', - '๐Ÿ™Ž๐Ÿพโ€โ™‚๏ธ', - '๐Ÿ™Ž๐Ÿพโ€โ™‚', - '๐Ÿ™Ž๐Ÿฟโ€โ™‚๏ธ', - '๐Ÿ™Ž๐Ÿฟโ€โ™‚', - '๐Ÿ™Žโ€โ™€๏ธ', - '๐Ÿ™Žโ€โ™€', - '๐Ÿ™Ž๐Ÿปโ€โ™€๏ธ', - '๐Ÿ™Ž๐Ÿปโ€โ™€', - '๐Ÿ™Ž๐Ÿผโ€โ™€๏ธ', - '๐Ÿ™Ž๐Ÿผโ€โ™€', - '๐Ÿ™Ž๐Ÿฝโ€โ™€๏ธ', - '๐Ÿ™Ž๐Ÿฝโ€โ™€', - '๐Ÿ™Ž๐Ÿพโ€โ™€๏ธ', - '๐Ÿ™Ž๐Ÿพโ€โ™€', - '๐Ÿ™Ž๐Ÿฟโ€โ™€๏ธ', - '๐Ÿ™Ž๐Ÿฟโ€โ™€', - '๐Ÿ™…', - '๐Ÿ™…๐Ÿป', - '๐Ÿ™…๐Ÿผ', - '๐Ÿ™…๐Ÿฝ', - '๐Ÿ™…๐Ÿพ', - '๐Ÿ™…๐Ÿฟ', - '๐Ÿ™…โ€โ™‚๏ธ', - '๐Ÿ™…โ€โ™‚', - '๐Ÿ™…๐Ÿปโ€โ™‚๏ธ', - '๐Ÿ™…๐Ÿปโ€โ™‚', - '๐Ÿ™…๐Ÿผโ€โ™‚๏ธ', - '๐Ÿ™…๐Ÿผโ€โ™‚', - '๐Ÿ™…๐Ÿฝโ€โ™‚๏ธ', - '๐Ÿ™…๐Ÿฝโ€โ™‚', - '๐Ÿ™…๐Ÿพโ€โ™‚๏ธ', - '๐Ÿ™…๐Ÿพโ€โ™‚', - '๐Ÿ™…๐Ÿฟโ€โ™‚๏ธ', - '๐Ÿ™…๐Ÿฟโ€โ™‚', - '๐Ÿ™…โ€โ™€๏ธ', - '๐Ÿ™…โ€โ™€', - '๐Ÿ™…๐Ÿปโ€โ™€๏ธ', - '๐Ÿ™…๐Ÿปโ€โ™€', - '๐Ÿ™…๐Ÿผโ€โ™€๏ธ', - '๐Ÿ™…๐Ÿผโ€โ™€', - '๐Ÿ™…๐Ÿฝโ€โ™€๏ธ', - '๐Ÿ™…๐Ÿฝโ€โ™€', - '๐Ÿ™…๐Ÿพโ€โ™€๏ธ', - '๐Ÿ™…๐Ÿพโ€โ™€', - '๐Ÿ™…๐Ÿฟโ€โ™€๏ธ', - '๐Ÿ™…๐Ÿฟโ€โ™€', - '๐Ÿ™†', - '๐Ÿ™†๐Ÿป', - '๐Ÿ™†๐Ÿผ', - '๐Ÿ™†๐Ÿฝ', - '๐Ÿ™†๐Ÿพ', - '๐Ÿ™†๐Ÿฟ', - '๐Ÿ™†โ€โ™‚๏ธ', - '๐Ÿ™†โ€โ™‚', - '๐Ÿ™†๐Ÿปโ€โ™‚๏ธ', - '๐Ÿ™†๐Ÿปโ€โ™‚', - '๐Ÿ™†๐Ÿผโ€โ™‚๏ธ', - '๐Ÿ™†๐Ÿผโ€โ™‚', - '๐Ÿ™†๐Ÿฝโ€โ™‚๏ธ', - '๐Ÿ™†๐Ÿฝโ€โ™‚', - '๐Ÿ™†๐Ÿพโ€โ™‚๏ธ', - '๐Ÿ™†๐Ÿพโ€โ™‚', - '๐Ÿ™†๐Ÿฟโ€โ™‚๏ธ', - '๐Ÿ™†๐Ÿฟโ€โ™‚', - '๐Ÿ™†โ€โ™€๏ธ', - '๐Ÿ™†โ€โ™€', - '๐Ÿ™†๐Ÿปโ€โ™€๏ธ', - '๐Ÿ™†๐Ÿปโ€โ™€', - '๐Ÿ™†๐Ÿผโ€โ™€๏ธ', - '๐Ÿ™†๐Ÿผโ€โ™€', - '๐Ÿ™†๐Ÿฝโ€โ™€๏ธ', - '๐Ÿ™†๐Ÿฝโ€โ™€', - '๐Ÿ™†๐Ÿพโ€โ™€๏ธ', - '๐Ÿ™†๐Ÿพโ€โ™€', - '๐Ÿ™†๐Ÿฟโ€โ™€๏ธ', - '๐Ÿ™†๐Ÿฟโ€โ™€', - '๐Ÿ’', - '๐Ÿ’๐Ÿป', - '๐Ÿ’๐Ÿผ', - '๐Ÿ’๐Ÿฝ', - '๐Ÿ’๐Ÿพ', - '๐Ÿ’๐Ÿฟ', - '๐Ÿ’โ€โ™‚๏ธ', - '๐Ÿ’โ€โ™‚', - '๐Ÿ’๐Ÿปโ€โ™‚๏ธ', - '๐Ÿ’๐Ÿปโ€โ™‚', - '๐Ÿ’๐Ÿผโ€โ™‚๏ธ', - '๐Ÿ’๐Ÿผโ€โ™‚', - '๐Ÿ’๐Ÿฝโ€โ™‚๏ธ', - '๐Ÿ’๐Ÿฝโ€โ™‚', - '๐Ÿ’๐Ÿพโ€โ™‚๏ธ', - '๐Ÿ’๐Ÿพโ€โ™‚', - '๐Ÿ’๐Ÿฟโ€โ™‚๏ธ', - '๐Ÿ’๐Ÿฟโ€โ™‚', - '๐Ÿ’โ€โ™€๏ธ', - '๐Ÿ’โ€โ™€', - '๐Ÿ’๐Ÿปโ€โ™€๏ธ', - '๐Ÿ’๐Ÿปโ€โ™€', - '๐Ÿ’๐Ÿผโ€โ™€๏ธ', - '๐Ÿ’๐Ÿผโ€โ™€', - '๐Ÿ’๐Ÿฝโ€โ™€๏ธ', - '๐Ÿ’๐Ÿฝโ€โ™€', - '๐Ÿ’๐Ÿพโ€โ™€๏ธ', - '๐Ÿ’๐Ÿพโ€โ™€', - '๐Ÿ’๐Ÿฟโ€โ™€๏ธ', - '๐Ÿ’๐Ÿฟโ€โ™€', - '๐Ÿ™‹', - '๐Ÿ™‹๐Ÿป', - '๐Ÿ™‹๐Ÿผ', - '๐Ÿ™‹๐Ÿฝ', - '๐Ÿ™‹๐Ÿพ', - '๐Ÿ™‹๐Ÿฟ', - '๐Ÿ™‹โ€โ™‚๏ธ', - '๐Ÿ™‹โ€โ™‚', - '๐Ÿ™‹๐Ÿปโ€โ™‚๏ธ', - '๐Ÿ™‹๐Ÿปโ€โ™‚', - '๐Ÿ™‹๐Ÿผโ€โ™‚๏ธ', - '๐Ÿ™‹๐Ÿผโ€โ™‚', - '๐Ÿ™‹๐Ÿฝโ€โ™‚๏ธ', - '๐Ÿ™‹๐Ÿฝโ€โ™‚', - '๐Ÿ™‹๐Ÿพโ€โ™‚๏ธ', - '๐Ÿ™‹๐Ÿพโ€โ™‚', - '๐Ÿ™‹๐Ÿฟโ€โ™‚๏ธ', - '๐Ÿ™‹๐Ÿฟโ€โ™‚', - '๐Ÿ™‹โ€โ™€๏ธ', - '๐Ÿ™‹โ€โ™€', - '๐Ÿ™‹๐Ÿปโ€โ™€๏ธ', - '๐Ÿ™‹๐Ÿปโ€โ™€', - '๐Ÿ™‹๐Ÿผโ€โ™€๏ธ', - '๐Ÿ™‹๐Ÿผโ€โ™€', - '๐Ÿ™‹๐Ÿฝโ€โ™€๏ธ', - '๐Ÿ™‹๐Ÿฝโ€โ™€', - '๐Ÿ™‹๐Ÿพโ€โ™€๏ธ', - '๐Ÿ™‹๐Ÿพโ€โ™€', - '๐Ÿ™‹๐Ÿฟโ€โ™€๏ธ', - '๐Ÿ™‹๐Ÿฟโ€โ™€', - '๐Ÿง', - '๐Ÿง๐Ÿป', - '๐Ÿง๐Ÿผ', - '๐Ÿง๐Ÿฝ', - '๐Ÿง๐Ÿพ', - '๐Ÿง๐Ÿฟ', - '๐Ÿงโ€โ™‚๏ธ', - '๐Ÿงโ€โ™‚', - '๐Ÿง๐Ÿปโ€โ™‚๏ธ', - '๐Ÿง๐Ÿปโ€โ™‚', - '๐Ÿง๐Ÿผโ€โ™‚๏ธ', - '๐Ÿง๐Ÿผโ€โ™‚', - '๐Ÿง๐Ÿฝโ€โ™‚๏ธ', - '๐Ÿง๐Ÿฝโ€โ™‚', - '๐Ÿง๐Ÿพโ€โ™‚๏ธ', - '๐Ÿง๐Ÿพโ€โ™‚', - '๐Ÿง๐Ÿฟโ€โ™‚๏ธ', - '๐Ÿง๐Ÿฟโ€โ™‚', - '๐Ÿงโ€โ™€๏ธ', - '๐Ÿงโ€โ™€', - '๐Ÿง๐Ÿปโ€โ™€๏ธ', - '๐Ÿง๐Ÿปโ€โ™€', - '๐Ÿง๐Ÿผโ€โ™€๏ธ', - '๐Ÿง๐Ÿผโ€โ™€', - '๐Ÿง๐Ÿฝโ€โ™€๏ธ', - '๐Ÿง๐Ÿฝโ€โ™€', - '๐Ÿง๐Ÿพโ€โ™€๏ธ', - '๐Ÿง๐Ÿพโ€โ™€', - '๐Ÿง๐Ÿฟโ€โ™€๏ธ', - '๐Ÿง๐Ÿฟโ€โ™€', - '๐Ÿ™‡', - '๐Ÿ™‡๐Ÿป', - '๐Ÿ™‡๐Ÿผ', - '๐Ÿ™‡๐Ÿฝ', - '๐Ÿ™‡๐Ÿพ', - '๐Ÿ™‡๐Ÿฟ', - '๐Ÿ™‡โ€โ™‚๏ธ', - '๐Ÿ™‡โ€โ™‚', - '๐Ÿ™‡๐Ÿปโ€โ™‚๏ธ', - '๐Ÿ™‡๐Ÿปโ€โ™‚', - '๐Ÿ™‡๐Ÿผโ€โ™‚๏ธ', - '๐Ÿ™‡๐Ÿผโ€โ™‚', - '๐Ÿ™‡๐Ÿฝโ€โ™‚๏ธ', - '๐Ÿ™‡๐Ÿฝโ€โ™‚', - '๐Ÿ™‡๐Ÿพโ€โ™‚๏ธ', - '๐Ÿ™‡๐Ÿพโ€โ™‚', - '๐Ÿ™‡๐Ÿฟโ€โ™‚๏ธ', - '๐Ÿ™‡๐Ÿฟโ€โ™‚', - '๐Ÿ™‡โ€โ™€๏ธ', - '๐Ÿ™‡โ€โ™€', - '๐Ÿ™‡๐Ÿปโ€โ™€๏ธ', - '๐Ÿ™‡๐Ÿปโ€โ™€', - '๐Ÿ™‡๐Ÿผโ€โ™€๏ธ', - '๐Ÿ™‡๐Ÿผโ€โ™€', - '๐Ÿ™‡๐Ÿฝโ€โ™€๏ธ', - '๐Ÿ™‡๐Ÿฝโ€โ™€', - '๐Ÿ™‡๐Ÿพโ€โ™€๏ธ', - '๐Ÿ™‡๐Ÿพโ€โ™€', - '๐Ÿ™‡๐Ÿฟโ€โ™€๏ธ', - '๐Ÿ™‡๐Ÿฟโ€โ™€', - '๐Ÿคฆ', - '๐Ÿคฆ๐Ÿป', - '๐Ÿคฆ๐Ÿผ', - '๐Ÿคฆ๐Ÿฝ', - '๐Ÿคฆ๐Ÿพ', - '๐Ÿคฆ๐Ÿฟ', - '๐Ÿคฆโ€โ™‚๏ธ', - '๐Ÿคฆโ€โ™‚', - '๐Ÿคฆ๐Ÿปโ€โ™‚๏ธ', - '๐Ÿคฆ๐Ÿปโ€โ™‚', - '๐Ÿคฆ๐Ÿผโ€โ™‚๏ธ', - '๐Ÿคฆ๐Ÿผโ€โ™‚', - '๐Ÿคฆ๐Ÿฝโ€โ™‚๏ธ', - '๐Ÿคฆ๐Ÿฝโ€โ™‚', - '๐Ÿคฆ๐Ÿพโ€โ™‚๏ธ', - '๐Ÿคฆ๐Ÿพโ€โ™‚', - '๐Ÿคฆ๐Ÿฟโ€โ™‚๏ธ', - '๐Ÿคฆ๐Ÿฟโ€โ™‚', - '๐Ÿคฆโ€โ™€๏ธ', - '๐Ÿคฆโ€โ™€', - '๐Ÿคฆ๐Ÿปโ€โ™€๏ธ', - '๐Ÿคฆ๐Ÿปโ€โ™€', - '๐Ÿคฆ๐Ÿผโ€โ™€๏ธ', - '๐Ÿคฆ๐Ÿผโ€โ™€', - '๐Ÿคฆ๐Ÿฝโ€โ™€๏ธ', - '๐Ÿคฆ๐Ÿฝโ€โ™€', - '๐Ÿคฆ๐Ÿพโ€โ™€๏ธ', - '๐Ÿคฆ๐Ÿพโ€โ™€', - '๐Ÿคฆ๐Ÿฟโ€โ™€๏ธ', - '๐Ÿคฆ๐Ÿฟโ€โ™€', - '๐Ÿคท', - '๐Ÿคท๐Ÿป', - '๐Ÿคท๐Ÿผ', - '๐Ÿคท๐Ÿฝ', - '๐Ÿคท๐Ÿพ', - '๐Ÿคท๐Ÿฟ', - '๐Ÿคทโ€โ™‚๏ธ', - '๐Ÿคทโ€โ™‚', - '๐Ÿคท๐Ÿปโ€โ™‚๏ธ', - '๐Ÿคท๐Ÿปโ€โ™‚', - '๐Ÿคท๐Ÿผโ€โ™‚๏ธ', - '๐Ÿคท๐Ÿผโ€โ™‚', - '๐Ÿคท๐Ÿฝโ€โ™‚๏ธ', - '๐Ÿคท๐Ÿฝโ€โ™‚', - '๐Ÿคท๐Ÿพโ€โ™‚๏ธ', - '๐Ÿคท๐Ÿพโ€โ™‚', - '๐Ÿคท๐Ÿฟโ€โ™‚๏ธ', - '๐Ÿคท๐Ÿฟโ€โ™‚', - '๐Ÿคทโ€โ™€๏ธ', - '๐Ÿคทโ€โ™€', - '๐Ÿคท๐Ÿปโ€โ™€๏ธ', - '๐Ÿคท๐Ÿปโ€โ™€', - '๐Ÿคท๐Ÿผโ€โ™€๏ธ', - '๐Ÿคท๐Ÿผโ€โ™€', - '๐Ÿคท๐Ÿฝโ€โ™€๏ธ', - '๐Ÿคท๐Ÿฝโ€โ™€', - '๐Ÿคท๐Ÿพโ€โ™€๏ธ', - '๐Ÿคท๐Ÿพโ€โ™€', - '๐Ÿคท๐Ÿฟโ€โ™€๏ธ', - '๐Ÿคท๐Ÿฟโ€โ™€', - '๐Ÿ‘จโ€โš•๏ธ', - '๐Ÿ‘จโ€โš•', - '๐Ÿ‘จ๐Ÿปโ€โš•๏ธ', - '๐Ÿ‘จ๐Ÿปโ€โš•', - '๐Ÿ‘จ๐Ÿผโ€โš•๏ธ', - '๐Ÿ‘จ๐Ÿผโ€โš•', - '๐Ÿ‘จ๐Ÿฝโ€โš•๏ธ', - '๐Ÿ‘จ๐Ÿฝโ€โš•', - '๐Ÿ‘จ๐Ÿพโ€โš•๏ธ', - '๐Ÿ‘จ๐Ÿพโ€โš•', - '๐Ÿ‘จ๐Ÿฟโ€โš•๏ธ', - '๐Ÿ‘จ๐Ÿฟโ€โš•', - '๐Ÿ‘ฉโ€โš•๏ธ', - '๐Ÿ‘ฉโ€โš•', - '๐Ÿ‘ฉ๐Ÿปโ€โš•๏ธ', - '๐Ÿ‘ฉ๐Ÿปโ€โš•', - '๐Ÿ‘ฉ๐Ÿผโ€โš•๏ธ', - '๐Ÿ‘ฉ๐Ÿผโ€โš•', - '๐Ÿ‘ฉ๐Ÿฝโ€โš•๏ธ', - '๐Ÿ‘ฉ๐Ÿฝโ€โš•', - '๐Ÿ‘ฉ๐Ÿพโ€โš•๏ธ', - '๐Ÿ‘ฉ๐Ÿพโ€โš•', - '๐Ÿ‘ฉ๐Ÿฟโ€โš•๏ธ', - '๐Ÿ‘ฉ๐Ÿฟโ€โš•', - '๐Ÿ‘จโ€๐ŸŽ“', - '๐Ÿ‘จ๐Ÿปโ€๐ŸŽ“', - '๐Ÿ‘จ๐Ÿผโ€๐ŸŽ“', - '๐Ÿ‘จ๐Ÿฝโ€๐ŸŽ“', - '๐Ÿ‘จ๐Ÿพโ€๐ŸŽ“', - '๐Ÿ‘จ๐Ÿฟโ€๐ŸŽ“', - '๐Ÿ‘ฉโ€๐ŸŽ“', - '๐Ÿ‘ฉ๐Ÿปโ€๐ŸŽ“', - '๐Ÿ‘ฉ๐Ÿผโ€๐ŸŽ“', - '๐Ÿ‘ฉ๐Ÿฝโ€๐ŸŽ“', - '๐Ÿ‘ฉ๐Ÿพโ€๐ŸŽ“', - '๐Ÿ‘ฉ๐Ÿฟโ€๐ŸŽ“', - '๐Ÿ‘จโ€๐Ÿซ', - '๐Ÿ‘จ๐Ÿปโ€๐Ÿซ', - '๐Ÿ‘จ๐Ÿผโ€๐Ÿซ', - '๐Ÿ‘จ๐Ÿฝโ€๐Ÿซ', - '๐Ÿ‘จ๐Ÿพโ€๐Ÿซ', - '๐Ÿ‘จ๐Ÿฟโ€๐Ÿซ', - '๐Ÿ‘ฉโ€๐Ÿซ', - '๐Ÿ‘ฉ๐Ÿปโ€๐Ÿซ', - '๐Ÿ‘ฉ๐Ÿผโ€๐Ÿซ', - '๐Ÿ‘ฉ๐Ÿฝโ€๐Ÿซ', - '๐Ÿ‘ฉ๐Ÿพโ€๐Ÿซ', - '๐Ÿ‘ฉ๐Ÿฟโ€๐Ÿซ', - '๐Ÿ‘จโ€โš–๏ธ', - '๐Ÿ‘จโ€โš–', - '๐Ÿ‘จ๐Ÿปโ€โš–๏ธ', - '๐Ÿ‘จ๐Ÿปโ€โš–', - '๐Ÿ‘จ๐Ÿผโ€โš–๏ธ', - '๐Ÿ‘จ๐Ÿผโ€โš–', - '๐Ÿ‘จ๐Ÿฝโ€โš–๏ธ', - '๐Ÿ‘จ๐Ÿฝโ€โš–', - '๐Ÿ‘จ๐Ÿพโ€โš–๏ธ', - '๐Ÿ‘จ๐Ÿพโ€โš–', - '๐Ÿ‘จ๐Ÿฟโ€โš–๏ธ', - '๐Ÿ‘จ๐Ÿฟโ€โš–', - '๐Ÿ‘ฉโ€โš–๏ธ', - '๐Ÿ‘ฉโ€โš–', - '๐Ÿ‘ฉ๐Ÿปโ€โš–๏ธ', - '๐Ÿ‘ฉ๐Ÿปโ€โš–', - '๐Ÿ‘ฉ๐Ÿผโ€โš–๏ธ', - '๐Ÿ‘ฉ๐Ÿผโ€โš–', - '๐Ÿ‘ฉ๐Ÿฝโ€โš–๏ธ', - '๐Ÿ‘ฉ๐Ÿฝโ€โš–', - '๐Ÿ‘ฉ๐Ÿพโ€โš–๏ธ', - '๐Ÿ‘ฉ๐Ÿพโ€โš–', - '๐Ÿ‘ฉ๐Ÿฟโ€โš–๏ธ', - '๐Ÿ‘ฉ๐Ÿฟโ€โš–', - '๐Ÿ‘จโ€๐ŸŒพ', - '๐Ÿ‘จ๐Ÿปโ€๐ŸŒพ', - '๐Ÿ‘จ๐Ÿผโ€๐ŸŒพ', - '๐Ÿ‘จ๐Ÿฝโ€๐ŸŒพ', - '๐Ÿ‘จ๐Ÿพโ€๐ŸŒพ', - '๐Ÿ‘จ๐Ÿฟโ€๐ŸŒพ', - '๐Ÿ‘ฉโ€๐ŸŒพ', - '๐Ÿ‘ฉ๐Ÿปโ€๐ŸŒพ', - '๐Ÿ‘ฉ๐Ÿผโ€๐ŸŒพ', - '๐Ÿ‘ฉ๐Ÿฝโ€๐ŸŒพ', - '๐Ÿ‘ฉ๐Ÿพโ€๐ŸŒพ', - '๐Ÿ‘ฉ๐Ÿฟโ€๐ŸŒพ', - '๐Ÿ‘จโ€๐Ÿณ', - '๐Ÿ‘จ๐Ÿปโ€๐Ÿณ', - '๐Ÿ‘จ๐Ÿผโ€๐Ÿณ', - '๐Ÿ‘จ๐Ÿฝโ€๐Ÿณ', - '๐Ÿ‘จ๐Ÿพโ€๐Ÿณ', - '๐Ÿ‘จ๐Ÿฟโ€๐Ÿณ', - '๐Ÿ‘ฉโ€๐Ÿณ', - '๐Ÿ‘ฉ๐Ÿปโ€๐Ÿณ', - '๐Ÿ‘ฉ๐Ÿผโ€๐Ÿณ', - '๐Ÿ‘ฉ๐Ÿฝโ€๐Ÿณ', - '๐Ÿ‘ฉ๐Ÿพโ€๐Ÿณ', - '๐Ÿ‘ฉ๐Ÿฟโ€๐Ÿณ', - '๐Ÿ‘จโ€๐Ÿ”ง', - '๐Ÿ‘จ๐Ÿปโ€๐Ÿ”ง', - '๐Ÿ‘จ๐Ÿผโ€๐Ÿ”ง', - '๐Ÿ‘จ๐Ÿฝโ€๐Ÿ”ง', - '๐Ÿ‘จ๐Ÿพโ€๐Ÿ”ง', - '๐Ÿ‘จ๐Ÿฟโ€๐Ÿ”ง', - '๐Ÿ‘ฉโ€๐Ÿ”ง', - '๐Ÿ‘ฉ๐Ÿปโ€๐Ÿ”ง', - '๐Ÿ‘ฉ๐Ÿผโ€๐Ÿ”ง', - '๐Ÿ‘ฉ๐Ÿฝโ€๐Ÿ”ง', - '๐Ÿ‘ฉ๐Ÿพโ€๐Ÿ”ง', - '๐Ÿ‘ฉ๐Ÿฟโ€๐Ÿ”ง', - '๐Ÿ‘จโ€๐Ÿญ', - '๐Ÿ‘จ๐Ÿปโ€๐Ÿญ', - '๐Ÿ‘จ๐Ÿผโ€๐Ÿญ', - '๐Ÿ‘จ๐Ÿฝโ€๐Ÿญ', - '๐Ÿ‘จ๐Ÿพโ€๐Ÿญ', - '๐Ÿ‘จ๐Ÿฟโ€๐Ÿญ', - '๐Ÿ‘ฉโ€๐Ÿญ', - '๐Ÿ‘ฉ๐Ÿปโ€๐Ÿญ', - '๐Ÿ‘ฉ๐Ÿผโ€๐Ÿญ', - '๐Ÿ‘ฉ๐Ÿฝโ€๐Ÿญ', - '๐Ÿ‘ฉ๐Ÿพโ€๐Ÿญ', - '๐Ÿ‘ฉ๐Ÿฟโ€๐Ÿญ', - '๐Ÿ‘จโ€๐Ÿ’ผ', - '๐Ÿ‘จ๐Ÿปโ€๐Ÿ’ผ', - '๐Ÿ‘จ๐Ÿผโ€๐Ÿ’ผ', - '๐Ÿ‘จ๐Ÿฝโ€๐Ÿ’ผ', - '๐Ÿ‘จ๐Ÿพโ€๐Ÿ’ผ', - '๐Ÿ‘จ๐Ÿฟโ€๐Ÿ’ผ', - '๐Ÿ‘ฉโ€๐Ÿ’ผ', - '๐Ÿ‘ฉ๐Ÿปโ€๐Ÿ’ผ', - '๐Ÿ‘ฉ๐Ÿผโ€๐Ÿ’ผ', - '๐Ÿ‘ฉ๐Ÿฝโ€๐Ÿ’ผ', - '๐Ÿ‘ฉ๐Ÿพโ€๐Ÿ’ผ', - '๐Ÿ‘ฉ๐Ÿฟโ€๐Ÿ’ผ', - '๐Ÿ‘จโ€๐Ÿ”ฌ', - '๐Ÿ‘จ๐Ÿปโ€๐Ÿ”ฌ', - '๐Ÿ‘จ๐Ÿผโ€๐Ÿ”ฌ', - '๐Ÿ‘จ๐Ÿฝโ€๐Ÿ”ฌ', - '๐Ÿ‘จ๐Ÿพโ€๐Ÿ”ฌ', - '๐Ÿ‘จ๐Ÿฟโ€๐Ÿ”ฌ', - '๐Ÿ‘ฉโ€๐Ÿ”ฌ', - '๐Ÿ‘ฉ๐Ÿปโ€๐Ÿ”ฌ', - '๐Ÿ‘ฉ๐Ÿผโ€๐Ÿ”ฌ', - '๐Ÿ‘ฉ๐Ÿฝโ€๐Ÿ”ฌ', - '๐Ÿ‘ฉ๐Ÿพโ€๐Ÿ”ฌ', - '๐Ÿ‘ฉ๐Ÿฟโ€๐Ÿ”ฌ', - '๐Ÿ‘จโ€๐Ÿ’ป', - '๐Ÿ‘จ๐Ÿปโ€๐Ÿ’ป', - '๐Ÿ‘จ๐Ÿผโ€๐Ÿ’ป', - '๐Ÿ‘จ๐Ÿฝโ€๐Ÿ’ป', - '๐Ÿ‘จ๐Ÿพโ€๐Ÿ’ป', - '๐Ÿ‘จ๐Ÿฟโ€๐Ÿ’ป', - '๐Ÿ‘ฉโ€๐Ÿ’ป', - '๐Ÿ‘ฉ๐Ÿปโ€๐Ÿ’ป', - '๐Ÿ‘ฉ๐Ÿผโ€๐Ÿ’ป', - '๐Ÿ‘ฉ๐Ÿฝโ€๐Ÿ’ป', - '๐Ÿ‘ฉ๐Ÿพโ€๐Ÿ’ป', - '๐Ÿ‘ฉ๐Ÿฟโ€๐Ÿ’ป', - '๐Ÿ‘จโ€๐ŸŽค', - '๐Ÿ‘จ๐Ÿปโ€๐ŸŽค', - '๐Ÿ‘จ๐Ÿผโ€๐ŸŽค', - '๐Ÿ‘จ๐Ÿฝโ€๐ŸŽค', - '๐Ÿ‘จ๐Ÿพโ€๐ŸŽค', - '๐Ÿ‘จ๐Ÿฟโ€๐ŸŽค', - '๐Ÿ‘ฉโ€๐ŸŽค', - '๐Ÿ‘ฉ๐Ÿปโ€๐ŸŽค', - '๐Ÿ‘ฉ๐Ÿผโ€๐ŸŽค', - '๐Ÿ‘ฉ๐Ÿฝโ€๐ŸŽค', - '๐Ÿ‘ฉ๐Ÿพโ€๐ŸŽค', - '๐Ÿ‘ฉ๐Ÿฟโ€๐ŸŽค', - '๐Ÿ‘จโ€๐ŸŽจ', - '๐Ÿ‘จ๐Ÿปโ€๐ŸŽจ', - '๐Ÿ‘จ๐Ÿผโ€๐ŸŽจ', - '๐Ÿ‘จ๐Ÿฝโ€๐ŸŽจ', - '๐Ÿ‘จ๐Ÿพโ€๐ŸŽจ', - '๐Ÿ‘จ๐Ÿฟโ€๐ŸŽจ', - '๐Ÿ‘ฉโ€๐ŸŽจ', - '๐Ÿ‘ฉ๐Ÿปโ€๐ŸŽจ', - '๐Ÿ‘ฉ๐Ÿผโ€๐ŸŽจ', - '๐Ÿ‘ฉ๐Ÿฝโ€๐ŸŽจ', - '๐Ÿ‘ฉ๐Ÿพโ€๐ŸŽจ', - '๐Ÿ‘ฉ๐Ÿฟโ€๐ŸŽจ', - '๐Ÿ‘จโ€โœˆ๏ธ', - '๐Ÿ‘จโ€โœˆ', - '๐Ÿ‘จ๐Ÿปโ€โœˆ๏ธ', - '๐Ÿ‘จ๐Ÿปโ€โœˆ', - '๐Ÿ‘จ๐Ÿผโ€โœˆ๏ธ', - '๐Ÿ‘จ๐Ÿผโ€โœˆ', - '๐Ÿ‘จ๐Ÿฝโ€โœˆ๏ธ', - '๐Ÿ‘จ๐Ÿฝโ€โœˆ', - '๐Ÿ‘จ๐Ÿพโ€โœˆ๏ธ', - '๐Ÿ‘จ๐Ÿพโ€โœˆ', - '๐Ÿ‘จ๐Ÿฟโ€โœˆ๏ธ', - '๐Ÿ‘จ๐Ÿฟโ€โœˆ', - '๐Ÿ‘ฉโ€โœˆ๏ธ', - '๐Ÿ‘ฉโ€โœˆ', - '๐Ÿ‘ฉ๐Ÿปโ€โœˆ๏ธ', - '๐Ÿ‘ฉ๐Ÿปโ€โœˆ', - '๐Ÿ‘ฉ๐Ÿผโ€โœˆ๏ธ', - '๐Ÿ‘ฉ๐Ÿผโ€โœˆ', - '๐Ÿ‘ฉ๐Ÿฝโ€โœˆ๏ธ', - '๐Ÿ‘ฉ๐Ÿฝโ€โœˆ', - '๐Ÿ‘ฉ๐Ÿพโ€โœˆ๏ธ', - '๐Ÿ‘ฉ๐Ÿพโ€โœˆ', - '๐Ÿ‘ฉ๐Ÿฟโ€โœˆ๏ธ', - '๐Ÿ‘ฉ๐Ÿฟโ€โœˆ', - '๐Ÿ‘จโ€๐Ÿš€', - '๐Ÿ‘จ๐Ÿปโ€๐Ÿš€', - '๐Ÿ‘จ๐Ÿผโ€๐Ÿš€', - '๐Ÿ‘จ๐Ÿฝโ€๐Ÿš€', - '๐Ÿ‘จ๐Ÿพโ€๐Ÿš€', - '๐Ÿ‘จ๐Ÿฟโ€๐Ÿš€', - '๐Ÿ‘ฉโ€๐Ÿš€', - '๐Ÿ‘ฉ๐Ÿปโ€๐Ÿš€', - '๐Ÿ‘ฉ๐Ÿผโ€๐Ÿš€', - '๐Ÿ‘ฉ๐Ÿฝโ€๐Ÿš€', - '๐Ÿ‘ฉ๐Ÿพโ€๐Ÿš€', - '๐Ÿ‘ฉ๐Ÿฟโ€๐Ÿš€', - '๐Ÿ‘จโ€๐Ÿš’', - '๐Ÿ‘จ๐Ÿปโ€๐Ÿš’', - '๐Ÿ‘จ๐Ÿผโ€๐Ÿš’', - '๐Ÿ‘จ๐Ÿฝโ€๐Ÿš’', - '๐Ÿ‘จ๐Ÿพโ€๐Ÿš’', - '๐Ÿ‘จ๐Ÿฟโ€๐Ÿš’', - '๐Ÿ‘ฉโ€๐Ÿš’', - '๐Ÿ‘ฉ๐Ÿปโ€๐Ÿš’', - '๐Ÿ‘ฉ๐Ÿผโ€๐Ÿš’', - '๐Ÿ‘ฉ๐Ÿฝโ€๐Ÿš’', - '๐Ÿ‘ฉ๐Ÿพโ€๐Ÿš’', - '๐Ÿ‘ฉ๐Ÿฟโ€๐Ÿš’', - '๐Ÿ‘ฎ', - '๐Ÿ‘ฎ๐Ÿป', - '๐Ÿ‘ฎ๐Ÿผ', - '๐Ÿ‘ฎ๐Ÿฝ', - '๐Ÿ‘ฎ๐Ÿพ', - '๐Ÿ‘ฎ๐Ÿฟ', - '๐Ÿ‘ฎโ€โ™‚๏ธ', - '๐Ÿ‘ฎโ€โ™‚', - '๐Ÿ‘ฎ๐Ÿปโ€โ™‚๏ธ', - '๐Ÿ‘ฎ๐Ÿปโ€โ™‚', - '๐Ÿ‘ฎ๐Ÿผโ€โ™‚๏ธ', - '๐Ÿ‘ฎ๐Ÿผโ€โ™‚', - '๐Ÿ‘ฎ๐Ÿฝโ€โ™‚๏ธ', - '๐Ÿ‘ฎ๐Ÿฝโ€โ™‚', - '๐Ÿ‘ฎ๐Ÿพโ€โ™‚๏ธ', - '๐Ÿ‘ฎ๐Ÿพโ€โ™‚', - '๐Ÿ‘ฎ๐Ÿฟโ€โ™‚๏ธ', - '๐Ÿ‘ฎ๐Ÿฟโ€โ™‚', - '๐Ÿ‘ฎโ€โ™€๏ธ', - '๐Ÿ‘ฎโ€โ™€', - '๐Ÿ‘ฎ๐Ÿปโ€โ™€๏ธ', - '๐Ÿ‘ฎ๐Ÿปโ€โ™€', - '๐Ÿ‘ฎ๐Ÿผโ€โ™€๏ธ', - '๐Ÿ‘ฎ๐Ÿผโ€โ™€', - '๐Ÿ‘ฎ๐Ÿฝโ€โ™€๏ธ', - '๐Ÿ‘ฎ๐Ÿฝโ€โ™€', - '๐Ÿ‘ฎ๐Ÿพโ€โ™€๏ธ', - '๐Ÿ‘ฎ๐Ÿพโ€โ™€', - '๐Ÿ‘ฎ๐Ÿฟโ€โ™€๏ธ', - '๐Ÿ‘ฎ๐Ÿฟโ€โ™€', - '๐Ÿ•ต๏ธ', - '๐Ÿ•ต', - '๐Ÿ•ต๐Ÿป', - '๐Ÿ•ต๐Ÿผ', - '๐Ÿ•ต๐Ÿฝ', - '๐Ÿ•ต๐Ÿพ', - '๐Ÿ•ต๐Ÿฟ', - '๐Ÿ•ต๏ธโ€โ™‚๏ธ', - '๐Ÿ•ตโ€โ™‚๏ธ', - '๐Ÿ•ต๏ธโ€โ™‚', - '๐Ÿ•ตโ€โ™‚', - '๐Ÿ•ต๐Ÿปโ€โ™‚๏ธ', - '๐Ÿ•ต๐Ÿปโ€โ™‚', - '๐Ÿ•ต๐Ÿผโ€โ™‚๏ธ', - '๐Ÿ•ต๐Ÿผโ€โ™‚', - '๐Ÿ•ต๐Ÿฝโ€โ™‚๏ธ', - '๐Ÿ•ต๐Ÿฝโ€โ™‚', - '๐Ÿ•ต๐Ÿพโ€โ™‚๏ธ', - '๐Ÿ•ต๐Ÿพโ€โ™‚', - '๐Ÿ•ต๐Ÿฟโ€โ™‚๏ธ', - '๐Ÿ•ต๐Ÿฟโ€โ™‚', - '๐Ÿ•ต๏ธโ€โ™€๏ธ', - '๐Ÿ•ตโ€โ™€๏ธ', - '๐Ÿ•ต๏ธโ€โ™€', - '๐Ÿ•ตโ€โ™€', - '๐Ÿ•ต๐Ÿปโ€โ™€๏ธ', - '๐Ÿ•ต๐Ÿปโ€โ™€', - '๐Ÿ•ต๐Ÿผโ€โ™€๏ธ', - '๐Ÿ•ต๐Ÿผโ€โ™€', - '๐Ÿ•ต๐Ÿฝโ€โ™€๏ธ', - '๐Ÿ•ต๐Ÿฝโ€โ™€', - '๐Ÿ•ต๐Ÿพโ€โ™€๏ธ', - '๐Ÿ•ต๐Ÿพโ€โ™€', - '๐Ÿ•ต๐Ÿฟโ€โ™€๏ธ', - '๐Ÿ•ต๐Ÿฟโ€โ™€', - '๐Ÿ’‚', - '๐Ÿ’‚๐Ÿป', - '๐Ÿ’‚๐Ÿผ', - '๐Ÿ’‚๐Ÿฝ', - '๐Ÿ’‚๐Ÿพ', - '๐Ÿ’‚๐Ÿฟ', - '๐Ÿ’‚โ€โ™‚๏ธ', - '๐Ÿ’‚โ€โ™‚', - '๐Ÿ’‚๐Ÿปโ€โ™‚๏ธ', - '๐Ÿ’‚๐Ÿปโ€โ™‚', - '๐Ÿ’‚๐Ÿผโ€โ™‚๏ธ', - '๐Ÿ’‚๐Ÿผโ€โ™‚', - '๐Ÿ’‚๐Ÿฝโ€โ™‚๏ธ', - '๐Ÿ’‚๐Ÿฝโ€โ™‚', - '๐Ÿ’‚๐Ÿพโ€โ™‚๏ธ', - '๐Ÿ’‚๐Ÿพโ€โ™‚', - '๐Ÿ’‚๐Ÿฟโ€โ™‚๏ธ', - '๐Ÿ’‚๐Ÿฟโ€โ™‚', - '๐Ÿ’‚โ€โ™€๏ธ', - '๐Ÿ’‚โ€โ™€', - '๐Ÿ’‚๐Ÿปโ€โ™€๏ธ', - '๐Ÿ’‚๐Ÿปโ€โ™€', - '๐Ÿ’‚๐Ÿผโ€โ™€๏ธ', - '๐Ÿ’‚๐Ÿผโ€โ™€', - '๐Ÿ’‚๐Ÿฝโ€โ™€๏ธ', - '๐Ÿ’‚๐Ÿฝโ€โ™€', - '๐Ÿ’‚๐Ÿพโ€โ™€๏ธ', - '๐Ÿ’‚๐Ÿพโ€โ™€', - '๐Ÿ’‚๐Ÿฟโ€โ™€๏ธ', - '๐Ÿ’‚๐Ÿฟโ€โ™€', - '๐Ÿ‘ท', - '๐Ÿ‘ท๐Ÿป', - '๐Ÿ‘ท๐Ÿผ', - '๐Ÿ‘ท๐Ÿฝ', - '๐Ÿ‘ท๐Ÿพ', - '๐Ÿ‘ท๐Ÿฟ', - '๐Ÿ‘ทโ€โ™‚๏ธ', - '๐Ÿ‘ทโ€โ™‚', - '๐Ÿ‘ท๐Ÿปโ€โ™‚๏ธ', - '๐Ÿ‘ท๐Ÿปโ€โ™‚', - '๐Ÿ‘ท๐Ÿผโ€โ™‚๏ธ', - '๐Ÿ‘ท๐Ÿผโ€โ™‚', - '๐Ÿ‘ท๐Ÿฝโ€โ™‚๏ธ', - '๐Ÿ‘ท๐Ÿฝโ€โ™‚', - '๐Ÿ‘ท๐Ÿพโ€โ™‚๏ธ', - '๐Ÿ‘ท๐Ÿพโ€โ™‚', - '๐Ÿ‘ท๐Ÿฟโ€โ™‚๏ธ', - '๐Ÿ‘ท๐Ÿฟโ€โ™‚', - '๐Ÿ‘ทโ€โ™€๏ธ', - '๐Ÿ‘ทโ€โ™€', - '๐Ÿ‘ท๐Ÿปโ€โ™€๏ธ', - '๐Ÿ‘ท๐Ÿปโ€โ™€', - '๐Ÿ‘ท๐Ÿผโ€โ™€๏ธ', - '๐Ÿ‘ท๐Ÿผโ€โ™€', - '๐Ÿ‘ท๐Ÿฝโ€โ™€๏ธ', - '๐Ÿ‘ท๐Ÿฝโ€โ™€', - '๐Ÿ‘ท๐Ÿพโ€โ™€๏ธ', - '๐Ÿ‘ท๐Ÿพโ€โ™€', - '๐Ÿ‘ท๐Ÿฟโ€โ™€๏ธ', - '๐Ÿ‘ท๐Ÿฟโ€โ™€', - '๐Ÿคด', - '๐Ÿคด๐Ÿป', - '๐Ÿคด๐Ÿผ', - '๐Ÿคด๐Ÿฝ', - '๐Ÿคด๐Ÿพ', - '๐Ÿคด๐Ÿฟ', - '๐Ÿ‘ธ', - '๐Ÿ‘ธ๐Ÿป', - '๐Ÿ‘ธ๐Ÿผ', - '๐Ÿ‘ธ๐Ÿฝ', - '๐Ÿ‘ธ๐Ÿพ', - '๐Ÿ‘ธ๐Ÿฟ', - '๐Ÿ‘ณ', - '๐Ÿ‘ณ๐Ÿป', - '๐Ÿ‘ณ๐Ÿผ', - '๐Ÿ‘ณ๐Ÿฝ', - '๐Ÿ‘ณ๐Ÿพ', - '๐Ÿ‘ณ๐Ÿฟ', - '๐Ÿ‘ณโ€โ™‚๏ธ', - '๐Ÿ‘ณโ€โ™‚', - '๐Ÿ‘ณ๐Ÿปโ€โ™‚๏ธ', - '๐Ÿ‘ณ๐Ÿปโ€โ™‚', - '๐Ÿ‘ณ๐Ÿผโ€โ™‚๏ธ', - '๐Ÿ‘ณ๐Ÿผโ€โ™‚', - '๐Ÿ‘ณ๐Ÿฝโ€โ™‚๏ธ', - '๐Ÿ‘ณ๐Ÿฝโ€โ™‚', - '๐Ÿ‘ณ๐Ÿพโ€โ™‚๏ธ', - '๐Ÿ‘ณ๐Ÿพโ€โ™‚', - '๐Ÿ‘ณ๐Ÿฟโ€โ™‚๏ธ', - '๐Ÿ‘ณ๐Ÿฟโ€โ™‚', - '๐Ÿ‘ณโ€โ™€๏ธ', - '๐Ÿ‘ณโ€โ™€', - '๐Ÿ‘ณ๐Ÿปโ€โ™€๏ธ', - '๐Ÿ‘ณ๐Ÿปโ€โ™€', - '๐Ÿ‘ณ๐Ÿผโ€โ™€๏ธ', - '๐Ÿ‘ณ๐Ÿผโ€โ™€', - '๐Ÿ‘ณ๐Ÿฝโ€โ™€๏ธ', - '๐Ÿ‘ณ๐Ÿฝโ€โ™€', - '๐Ÿ‘ณ๐Ÿพโ€โ™€๏ธ', - '๐Ÿ‘ณ๐Ÿพโ€โ™€', - '๐Ÿ‘ณ๐Ÿฟโ€โ™€๏ธ', - '๐Ÿ‘ณ๐Ÿฟโ€โ™€', - '๐Ÿ‘ฒ', - '๐Ÿ‘ฒ๐Ÿป', - '๐Ÿ‘ฒ๐Ÿผ', - '๐Ÿ‘ฒ๐Ÿฝ', - '๐Ÿ‘ฒ๐Ÿพ', - '๐Ÿ‘ฒ๐Ÿฟ', - '๐Ÿง•', - '๐Ÿง•๐Ÿป', - '๐Ÿง•๐Ÿผ', - '๐Ÿง•๐Ÿฝ', - '๐Ÿง•๐Ÿพ', - '๐Ÿง•๐Ÿฟ', - '๐Ÿคต', - '๐Ÿคต๐Ÿป', - '๐Ÿคต๐Ÿผ', - '๐Ÿคต๐Ÿฝ', - '๐Ÿคต๐Ÿพ', - '๐Ÿคต๐Ÿฟ', - '๐Ÿ‘ฐ', - '๐Ÿ‘ฐ๐Ÿป', - '๐Ÿ‘ฐ๐Ÿผ', - '๐Ÿ‘ฐ๐Ÿฝ', - '๐Ÿ‘ฐ๐Ÿพ', - '๐Ÿ‘ฐ๐Ÿฟ', - '๐Ÿคฐ', - '๐Ÿคฐ๐Ÿป', - '๐Ÿคฐ๐Ÿผ', - '๐Ÿคฐ๐Ÿฝ', - '๐Ÿคฐ๐Ÿพ', - '๐Ÿคฐ๐Ÿฟ', - '๐Ÿคฑ', - '๐Ÿคฑ๐Ÿป', - '๐Ÿคฑ๐Ÿผ', - '๐Ÿคฑ๐Ÿฝ', - '๐Ÿคฑ๐Ÿพ', - '๐Ÿคฑ๐Ÿฟ', - '๐Ÿ‘ผ', - '๐Ÿ‘ผ๐Ÿป', - '๐Ÿ‘ผ๐Ÿผ', - '๐Ÿ‘ผ๐Ÿฝ', - '๐Ÿ‘ผ๐Ÿพ', - '๐Ÿ‘ผ๐Ÿฟ', - '๐ŸŽ…', - '๐ŸŽ…๐Ÿป', - '๐ŸŽ…๐Ÿผ', - '๐ŸŽ…๐Ÿฝ', - '๐ŸŽ…๐Ÿพ', - '๐ŸŽ…๐Ÿฟ', - '๐Ÿคถ', - '๐Ÿคถ๐Ÿป', - '๐Ÿคถ๐Ÿผ', - '๐Ÿคถ๐Ÿฝ', - '๐Ÿคถ๐Ÿพ', - '๐Ÿคถ๐Ÿฟ', - '๐Ÿฆธ', - '๐Ÿฆธ๐Ÿป', - '๐Ÿฆธ๐Ÿผ', - '๐Ÿฆธ๐Ÿฝ', - '๐Ÿฆธ๐Ÿพ', - '๐Ÿฆธ๐Ÿฟ', - '๐Ÿฆธโ€โ™‚๏ธ', - '๐Ÿฆธโ€โ™‚', - '๐Ÿฆธ๐Ÿปโ€โ™‚๏ธ', - '๐Ÿฆธ๐Ÿปโ€โ™‚', - '๐Ÿฆธ๐Ÿผโ€โ™‚๏ธ', - '๐Ÿฆธ๐Ÿผโ€โ™‚', - '๐Ÿฆธ๐Ÿฝโ€โ™‚๏ธ', - '๐Ÿฆธ๐Ÿฝโ€โ™‚', - '๐Ÿฆธ๐Ÿพโ€โ™‚๏ธ', - '๐Ÿฆธ๐Ÿพโ€โ™‚', - '๐Ÿฆธ๐Ÿฟโ€โ™‚๏ธ', - '๐Ÿฆธ๐Ÿฟโ€โ™‚', - '๐Ÿฆธโ€โ™€๏ธ', - '๐Ÿฆธโ€โ™€', - '๐Ÿฆธ๐Ÿปโ€โ™€๏ธ', - '๐Ÿฆธ๐Ÿปโ€โ™€', - '๐Ÿฆธ๐Ÿผโ€โ™€๏ธ', - '๐Ÿฆธ๐Ÿผโ€โ™€', - '๐Ÿฆธ๐Ÿฝโ€โ™€๏ธ', - '๐Ÿฆธ๐Ÿฝโ€โ™€', - '๐Ÿฆธ๐Ÿพโ€โ™€๏ธ', - '๐Ÿฆธ๐Ÿพโ€โ™€', - '๐Ÿฆธ๐Ÿฟโ€โ™€๏ธ', - '๐Ÿฆธ๐Ÿฟโ€โ™€', - '๐Ÿฆน', - '๐Ÿฆน๐Ÿป', - '๐Ÿฆน๐Ÿผ', - '๐Ÿฆน๐Ÿฝ', - '๐Ÿฆน๐Ÿพ', - '๐Ÿฆน๐Ÿฟ', - '๐Ÿฆนโ€โ™‚๏ธ', - '๐Ÿฆนโ€โ™‚', - '๐Ÿฆน๐Ÿปโ€โ™‚๏ธ', - '๐Ÿฆน๐Ÿปโ€โ™‚', - '๐Ÿฆน๐Ÿผโ€โ™‚๏ธ', - '๐Ÿฆน๐Ÿผโ€โ™‚', - '๐Ÿฆน๐Ÿฝโ€โ™‚๏ธ', - '๐Ÿฆน๐Ÿฝโ€โ™‚', - '๐Ÿฆน๐Ÿพโ€โ™‚๏ธ', - '๐Ÿฆน๐Ÿพโ€โ™‚', - '๐Ÿฆน๐Ÿฟโ€โ™‚๏ธ', - '๐Ÿฆน๐Ÿฟโ€โ™‚', - '๐Ÿฆนโ€โ™€๏ธ', - '๐Ÿฆนโ€โ™€', - '๐Ÿฆน๐Ÿปโ€โ™€๏ธ', - '๐Ÿฆน๐Ÿปโ€โ™€', - '๐Ÿฆน๐Ÿผโ€โ™€๏ธ', - '๐Ÿฆน๐Ÿผโ€โ™€', - '๐Ÿฆน๐Ÿฝโ€โ™€๏ธ', - '๐Ÿฆน๐Ÿฝโ€โ™€', - '๐Ÿฆน๐Ÿพโ€โ™€๏ธ', - '๐Ÿฆน๐Ÿพโ€โ™€', - '๐Ÿฆน๐Ÿฟโ€โ™€๏ธ', - '๐Ÿฆน๐Ÿฟโ€โ™€', - '๐Ÿง™', - '๐Ÿง™๐Ÿป', - '๐Ÿง™๐Ÿผ', - '๐Ÿง™๐Ÿฝ', - '๐Ÿง™๐Ÿพ', - '๐Ÿง™๐Ÿฟ', - '๐Ÿง™โ€โ™‚๏ธ', - '๐Ÿง™โ€โ™‚', - '๐Ÿง™๐Ÿปโ€โ™‚๏ธ', - '๐Ÿง™๐Ÿปโ€โ™‚', - '๐Ÿง™๐Ÿผโ€โ™‚๏ธ', - '๐Ÿง™๐Ÿผโ€โ™‚', - '๐Ÿง™๐Ÿฝโ€โ™‚๏ธ', - '๐Ÿง™๐Ÿฝโ€โ™‚', - '๐Ÿง™๐Ÿพโ€โ™‚๏ธ', - '๐Ÿง™๐Ÿพโ€โ™‚', - '๐Ÿง™๐Ÿฟโ€โ™‚๏ธ', - '๐Ÿง™๐Ÿฟโ€โ™‚', - '๐Ÿง™โ€โ™€๏ธ', - '๐Ÿง™โ€โ™€', - '๐Ÿง™๐Ÿปโ€โ™€๏ธ', - '๐Ÿง™๐Ÿปโ€โ™€', - '๐Ÿง™๐Ÿผโ€โ™€๏ธ', - '๐Ÿง™๐Ÿผโ€โ™€', - '๐Ÿง™๐Ÿฝโ€โ™€๏ธ', - '๐Ÿง™๐Ÿฝโ€โ™€', - '๐Ÿง™๐Ÿพโ€โ™€๏ธ', - '๐Ÿง™๐Ÿพโ€โ™€', - '๐Ÿง™๐Ÿฟโ€โ™€๏ธ', - '๐Ÿง™๐Ÿฟโ€โ™€', - '๐Ÿงš', - '๐Ÿงš๐Ÿป', - '๐Ÿงš๐Ÿผ', - '๐Ÿงš๐Ÿฝ', - '๐Ÿงš๐Ÿพ', - '๐Ÿงš๐Ÿฟ', - '๐Ÿงšโ€โ™‚๏ธ', - '๐Ÿงšโ€โ™‚', - '๐Ÿงš๐Ÿปโ€โ™‚๏ธ', - '๐Ÿงš๐Ÿปโ€โ™‚', - '๐Ÿงš๐Ÿผโ€โ™‚๏ธ', - '๐Ÿงš๐Ÿผโ€โ™‚', - '๐Ÿงš๐Ÿฝโ€โ™‚๏ธ', - '๐Ÿงš๐Ÿฝโ€โ™‚', - '๐Ÿงš๐Ÿพโ€โ™‚๏ธ', - '๐Ÿงš๐Ÿพโ€โ™‚', - '๐Ÿงš๐Ÿฟโ€โ™‚๏ธ', - '๐Ÿงš๐Ÿฟโ€โ™‚', - '๐Ÿงšโ€โ™€๏ธ', - '๐Ÿงšโ€โ™€', - '๐Ÿงš๐Ÿปโ€โ™€๏ธ', - '๐Ÿงš๐Ÿปโ€โ™€', - '๐Ÿงš๐Ÿผโ€โ™€๏ธ', - '๐Ÿงš๐Ÿผโ€โ™€', - '๐Ÿงš๐Ÿฝโ€โ™€๏ธ', - '๐Ÿงš๐Ÿฝโ€โ™€', - '๐Ÿงš๐Ÿพโ€โ™€๏ธ', - '๐Ÿงš๐Ÿพโ€โ™€', - '๐Ÿงš๐Ÿฟโ€โ™€๏ธ', - '๐Ÿงš๐Ÿฟโ€โ™€', - '๐Ÿง›', - '๐Ÿง›๐Ÿป', - '๐Ÿง›๐Ÿผ', - '๐Ÿง›๐Ÿฝ', - '๐Ÿง›๐Ÿพ', - '๐Ÿง›๐Ÿฟ', - '๐Ÿง›โ€โ™‚๏ธ', - '๐Ÿง›โ€โ™‚', - '๐Ÿง›๐Ÿปโ€โ™‚๏ธ', - '๐Ÿง›๐Ÿปโ€โ™‚', - '๐Ÿง›๐Ÿผโ€โ™‚๏ธ', - '๐Ÿง›๐Ÿผโ€โ™‚', - '๐Ÿง›๐Ÿฝโ€โ™‚๏ธ', - '๐Ÿง›๐Ÿฝโ€โ™‚', - '๐Ÿง›๐Ÿพโ€โ™‚๏ธ', - '๐Ÿง›๐Ÿพโ€โ™‚', - '๐Ÿง›๐Ÿฟโ€โ™‚๏ธ', - '๐Ÿง›๐Ÿฟโ€โ™‚', - '๐Ÿง›โ€โ™€๏ธ', - '๐Ÿง›โ€โ™€', - '๐Ÿง›๐Ÿปโ€โ™€๏ธ', - '๐Ÿง›๐Ÿปโ€โ™€', - '๐Ÿง›๐Ÿผโ€โ™€๏ธ', - '๐Ÿง›๐Ÿผโ€โ™€', - '๐Ÿง›๐Ÿฝโ€โ™€๏ธ', - '๐Ÿง›๐Ÿฝโ€โ™€', - '๐Ÿง›๐Ÿพโ€โ™€๏ธ', - '๐Ÿง›๐Ÿพโ€โ™€', - '๐Ÿง›๐Ÿฟโ€โ™€๏ธ', - '๐Ÿง›๐Ÿฟโ€โ™€', - '๐Ÿงœ', - '๐Ÿงœ๐Ÿป', - '๐Ÿงœ๐Ÿผ', - '๐Ÿงœ๐Ÿฝ', - '๐Ÿงœ๐Ÿพ', - '๐Ÿงœ๐Ÿฟ', - '๐Ÿงœโ€โ™‚๏ธ', - '๐Ÿงœโ€โ™‚', - '๐Ÿงœ๐Ÿปโ€โ™‚๏ธ', - '๐Ÿงœ๐Ÿปโ€โ™‚', - '๐Ÿงœ๐Ÿผโ€โ™‚๏ธ', - '๐Ÿงœ๐Ÿผโ€โ™‚', - '๐Ÿงœ๐Ÿฝโ€โ™‚๏ธ', - '๐Ÿงœ๐Ÿฝโ€โ™‚', - '๐Ÿงœ๐Ÿพโ€โ™‚๏ธ', - '๐Ÿงœ๐Ÿพโ€โ™‚', - '๐Ÿงœ๐Ÿฟโ€โ™‚๏ธ', - '๐Ÿงœ๐Ÿฟโ€โ™‚', - '๐Ÿงœโ€โ™€๏ธ', - '๐Ÿงœโ€โ™€', - '๐Ÿงœ๐Ÿปโ€โ™€๏ธ', - '๐Ÿงœ๐Ÿปโ€โ™€', - '๐Ÿงœ๐Ÿผโ€โ™€๏ธ', - '๐Ÿงœ๐Ÿผโ€โ™€', - '๐Ÿงœ๐Ÿฝโ€โ™€๏ธ', - '๐Ÿงœ๐Ÿฝโ€โ™€', - '๐Ÿงœ๐Ÿพโ€โ™€๏ธ', - '๐Ÿงœ๐Ÿพโ€โ™€', - '๐Ÿงœ๐Ÿฟโ€โ™€๏ธ', - '๐Ÿงœ๐Ÿฟโ€โ™€', - '๐Ÿง', - '๐Ÿง๐Ÿป', - '๐Ÿง๐Ÿผ', - '๐Ÿง๐Ÿฝ', - '๐Ÿง๐Ÿพ', - '๐Ÿง๐Ÿฟ', - '๐Ÿงโ€โ™‚๏ธ', - '๐Ÿงโ€โ™‚', - '๐Ÿง๐Ÿปโ€โ™‚๏ธ', - '๐Ÿง๐Ÿปโ€โ™‚', - '๐Ÿง๐Ÿผโ€โ™‚๏ธ', - '๐Ÿง๐Ÿผโ€โ™‚', - '๐Ÿง๐Ÿฝโ€โ™‚๏ธ', - '๐Ÿง๐Ÿฝโ€โ™‚', - '๐Ÿง๐Ÿพโ€โ™‚๏ธ', - '๐Ÿง๐Ÿพโ€โ™‚', - '๐Ÿง๐Ÿฟโ€โ™‚๏ธ', - '๐Ÿง๐Ÿฟโ€โ™‚', - '๐Ÿงโ€โ™€๏ธ', - '๐Ÿงโ€โ™€', - '๐Ÿง๐Ÿปโ€โ™€๏ธ', - '๐Ÿง๐Ÿปโ€โ™€', - '๐Ÿง๐Ÿผโ€โ™€๏ธ', - '๐Ÿง๐Ÿผโ€โ™€', - '๐Ÿง๐Ÿฝโ€โ™€๏ธ', - '๐Ÿง๐Ÿฝโ€โ™€', - '๐Ÿง๐Ÿพโ€โ™€๏ธ', - '๐Ÿง๐Ÿพโ€โ™€', - '๐Ÿง๐Ÿฟโ€โ™€๏ธ', - '๐Ÿง๐Ÿฟโ€โ™€', - '๐Ÿงž', - '๐Ÿงžโ€โ™‚๏ธ', - '๐Ÿงžโ€โ™‚', - '๐Ÿงžโ€โ™€๏ธ', - '๐Ÿงžโ€โ™€', - '๐ŸงŸ', - '๐ŸงŸโ€โ™‚๏ธ', - '๐ŸงŸโ€โ™‚', - '๐ŸงŸโ€โ™€๏ธ', - '๐ŸงŸโ€โ™€', - '๐Ÿ’†', - '๐Ÿ’†๐Ÿป', - '๐Ÿ’†๐Ÿผ', - '๐Ÿ’†๐Ÿฝ', - '๐Ÿ’†๐Ÿพ', - '๐Ÿ’†๐Ÿฟ', - '๐Ÿ’†โ€โ™‚๏ธ', - '๐Ÿ’†โ€โ™‚', - '๐Ÿ’†๐Ÿปโ€โ™‚๏ธ', - '๐Ÿ’†๐Ÿปโ€โ™‚', - '๐Ÿ’†๐Ÿผโ€โ™‚๏ธ', - '๐Ÿ’†๐Ÿผโ€โ™‚', - '๐Ÿ’†๐Ÿฝโ€โ™‚๏ธ', - '๐Ÿ’†๐Ÿฝโ€โ™‚', - '๐Ÿ’†๐Ÿพโ€โ™‚๏ธ', - '๐Ÿ’†๐Ÿพโ€โ™‚', - '๐Ÿ’†๐Ÿฟโ€โ™‚๏ธ', - '๐Ÿ’†๐Ÿฟโ€โ™‚', - '๐Ÿ’†โ€โ™€๏ธ', - '๐Ÿ’†โ€โ™€', - '๐Ÿ’†๐Ÿปโ€โ™€๏ธ', - '๐Ÿ’†๐Ÿปโ€โ™€', - '๐Ÿ’†๐Ÿผโ€โ™€๏ธ', - '๐Ÿ’†๐Ÿผโ€โ™€', - '๐Ÿ’†๐Ÿฝโ€โ™€๏ธ', - '๐Ÿ’†๐Ÿฝโ€โ™€', - '๐Ÿ’†๐Ÿพโ€โ™€๏ธ', - '๐Ÿ’†๐Ÿพโ€โ™€', - '๐Ÿ’†๐Ÿฟโ€โ™€๏ธ', - '๐Ÿ’†๐Ÿฟโ€โ™€', - '๐Ÿ’‡', - '๐Ÿ’‡๐Ÿป', - '๐Ÿ’‡๐Ÿผ', - '๐Ÿ’‡๐Ÿฝ', - '๐Ÿ’‡๐Ÿพ', - '๐Ÿ’‡๐Ÿฟ', - '๐Ÿ’‡โ€โ™‚๏ธ', - '๐Ÿ’‡โ€โ™‚', - '๐Ÿ’‡๐Ÿปโ€โ™‚๏ธ', - '๐Ÿ’‡๐Ÿปโ€โ™‚', - '๐Ÿ’‡๐Ÿผโ€โ™‚๏ธ', - '๐Ÿ’‡๐Ÿผโ€โ™‚', - '๐Ÿ’‡๐Ÿฝโ€โ™‚๏ธ', - '๐Ÿ’‡๐Ÿฝโ€โ™‚', - '๐Ÿ’‡๐Ÿพโ€โ™‚๏ธ', - '๐Ÿ’‡๐Ÿพโ€โ™‚', - '๐Ÿ’‡๐Ÿฟโ€โ™‚๏ธ', - '๐Ÿ’‡๐Ÿฟโ€โ™‚', - '๐Ÿ’‡โ€โ™€๏ธ', - '๐Ÿ’‡โ€โ™€', - '๐Ÿ’‡๐Ÿปโ€โ™€๏ธ', - '๐Ÿ’‡๐Ÿปโ€โ™€', - '๐Ÿ’‡๐Ÿผโ€โ™€๏ธ', - '๐Ÿ’‡๐Ÿผโ€โ™€', - '๐Ÿ’‡๐Ÿฝโ€โ™€๏ธ', - '๐Ÿ’‡๐Ÿฝโ€โ™€', - '๐Ÿ’‡๐Ÿพโ€โ™€๏ธ', - '๐Ÿ’‡๐Ÿพโ€โ™€', - '๐Ÿ’‡๐Ÿฟโ€โ™€๏ธ', - '๐Ÿ’‡๐Ÿฟโ€โ™€', - '๐Ÿšถ', - '๐Ÿšถ๐Ÿป', - '๐Ÿšถ๐Ÿผ', - '๐Ÿšถ๐Ÿฝ', - '๐Ÿšถ๐Ÿพ', - '๐Ÿšถ๐Ÿฟ', - '๐Ÿšถโ€โ™‚๏ธ', - '๐Ÿšถโ€โ™‚', - '๐Ÿšถ๐Ÿปโ€โ™‚๏ธ', - '๐Ÿšถ๐Ÿปโ€โ™‚', - '๐Ÿšถ๐Ÿผโ€โ™‚๏ธ', - '๐Ÿšถ๐Ÿผโ€โ™‚', - '๐Ÿšถ๐Ÿฝโ€โ™‚๏ธ', - '๐Ÿšถ๐Ÿฝโ€โ™‚', - '๐Ÿšถ๐Ÿพโ€โ™‚๏ธ', - '๐Ÿšถ๐Ÿพโ€โ™‚', - '๐Ÿšถ๐Ÿฟโ€โ™‚๏ธ', - '๐Ÿšถ๐Ÿฟโ€โ™‚', - '๐Ÿšถโ€โ™€๏ธ', - '๐Ÿšถโ€โ™€', - '๐Ÿšถ๐Ÿปโ€โ™€๏ธ', - '๐Ÿšถ๐Ÿปโ€โ™€', - '๐Ÿšถ๐Ÿผโ€โ™€๏ธ', - '๐Ÿšถ๐Ÿผโ€โ™€', - '๐Ÿšถ๐Ÿฝโ€โ™€๏ธ', - '๐Ÿšถ๐Ÿฝโ€โ™€', - '๐Ÿšถ๐Ÿพโ€โ™€๏ธ', - '๐Ÿšถ๐Ÿพโ€โ™€', - '๐Ÿšถ๐Ÿฟโ€โ™€๏ธ', - '๐Ÿšถ๐Ÿฟโ€โ™€', - '๐Ÿง', - '๐Ÿง๐Ÿป', - '๐Ÿง๐Ÿผ', - '๐Ÿง๐Ÿฝ', - '๐Ÿง๐Ÿพ', - '๐Ÿง๐Ÿฟ', - '๐Ÿงโ€โ™‚๏ธ', - '๐Ÿงโ€โ™‚', - '๐Ÿง๐Ÿปโ€โ™‚๏ธ', - '๐Ÿง๐Ÿปโ€โ™‚', - '๐Ÿง๐Ÿผโ€โ™‚๏ธ', - '๐Ÿง๐Ÿผโ€โ™‚', - '๐Ÿง๐Ÿฝโ€โ™‚๏ธ', - '๐Ÿง๐Ÿฝโ€โ™‚', - '๐Ÿง๐Ÿพโ€โ™‚๏ธ', - '๐Ÿง๐Ÿพโ€โ™‚', - '๐Ÿง๐Ÿฟโ€โ™‚๏ธ', - '๐Ÿง๐Ÿฟโ€โ™‚', - '๐Ÿงโ€โ™€๏ธ', - '๐Ÿงโ€โ™€', - '๐Ÿง๐Ÿปโ€โ™€๏ธ', - '๐Ÿง๐Ÿปโ€โ™€', - '๐Ÿง๐Ÿผโ€โ™€๏ธ', - '๐Ÿง๐Ÿผโ€โ™€', - '๐Ÿง๐Ÿฝโ€โ™€๏ธ', - '๐Ÿง๐Ÿฝโ€โ™€', - '๐Ÿง๐Ÿพโ€โ™€๏ธ', - '๐Ÿง๐Ÿพโ€โ™€', - '๐Ÿง๐Ÿฟโ€โ™€๏ธ', - '๐Ÿง๐Ÿฟโ€โ™€', - '๐ŸงŽ', - '๐ŸงŽ๐Ÿป', - '๐ŸงŽ๐Ÿผ', - '๐ŸงŽ๐Ÿฝ', - '๐ŸงŽ๐Ÿพ', - '๐ŸงŽ๐Ÿฟ', - '๐ŸงŽโ€โ™‚๏ธ', - '๐ŸงŽโ€โ™‚', - '๐ŸงŽ๐Ÿปโ€โ™‚๏ธ', - '๐ŸงŽ๐Ÿปโ€โ™‚', - '๐ŸงŽ๐Ÿผโ€โ™‚๏ธ', - '๐ŸงŽ๐Ÿผโ€โ™‚', - '๐ŸงŽ๐Ÿฝโ€โ™‚๏ธ', - '๐ŸงŽ๐Ÿฝโ€โ™‚', - '๐ŸงŽ๐Ÿพโ€โ™‚๏ธ', - '๐ŸงŽ๐Ÿพโ€โ™‚', - '๐ŸงŽ๐Ÿฟโ€โ™‚๏ธ', - '๐ŸงŽ๐Ÿฟโ€โ™‚', - '๐ŸงŽโ€โ™€๏ธ', - '๐ŸงŽโ€โ™€', - '๐ŸงŽ๐Ÿปโ€โ™€๏ธ', - '๐ŸงŽ๐Ÿปโ€โ™€', - '๐ŸงŽ๐Ÿผโ€โ™€๏ธ', - '๐ŸงŽ๐Ÿผโ€โ™€', - '๐ŸงŽ๐Ÿฝโ€โ™€๏ธ', - '๐ŸงŽ๐Ÿฝโ€โ™€', - '๐ŸงŽ๐Ÿพโ€โ™€๏ธ', - '๐ŸงŽ๐Ÿพโ€โ™€', - '๐ŸงŽ๐Ÿฟโ€โ™€๏ธ', - '๐ŸงŽ๐Ÿฟโ€โ™€', - '๐Ÿ‘จโ€๐Ÿฆฏ', - '๐Ÿ‘จ๐Ÿปโ€๐Ÿฆฏ', - '๐Ÿ‘จ๐Ÿผโ€๐Ÿฆฏ', - '๐Ÿ‘จ๐Ÿฝโ€๐Ÿฆฏ', - '๐Ÿ‘จ๐Ÿพโ€๐Ÿฆฏ', - '๐Ÿ‘จ๐Ÿฟโ€๐Ÿฆฏ', - '๐Ÿ‘ฉโ€๐Ÿฆฏ', - '๐Ÿ‘ฉ๐Ÿปโ€๐Ÿฆฏ', - '๐Ÿ‘ฉ๐Ÿผโ€๐Ÿฆฏ', - '๐Ÿ‘ฉ๐Ÿฝโ€๐Ÿฆฏ', - '๐Ÿ‘ฉ๐Ÿพโ€๐Ÿฆฏ', - '๐Ÿ‘ฉ๐Ÿฟโ€๐Ÿฆฏ', - '๐Ÿ‘จโ€๐Ÿฆผ', - '๐Ÿ‘จ๐Ÿปโ€๐Ÿฆผ', - '๐Ÿ‘จ๐Ÿผโ€๐Ÿฆผ', - '๐Ÿ‘จ๐Ÿฝโ€๐Ÿฆผ', - '๐Ÿ‘จ๐Ÿพโ€๐Ÿฆผ', - '๐Ÿ‘จ๐Ÿฟโ€๐Ÿฆผ', - '๐Ÿ‘ฉโ€๐Ÿฆผ', - '๐Ÿ‘ฉ๐Ÿปโ€๐Ÿฆผ', - '๐Ÿ‘ฉ๐Ÿผโ€๐Ÿฆผ', - '๐Ÿ‘ฉ๐Ÿฝโ€๐Ÿฆผ', - '๐Ÿ‘ฉ๐Ÿพโ€๐Ÿฆผ', - '๐Ÿ‘ฉ๐Ÿฟโ€๐Ÿฆผ', - '๐Ÿ‘จโ€๐Ÿฆฝ', - '๐Ÿ‘จ๐Ÿปโ€๐Ÿฆฝ', - '๐Ÿ‘จ๐Ÿผโ€๐Ÿฆฝ', - '๐Ÿ‘จ๐Ÿฝโ€๐Ÿฆฝ', - '๐Ÿ‘จ๐Ÿพโ€๐Ÿฆฝ', - '๐Ÿ‘จ๐Ÿฟโ€๐Ÿฆฝ', - '๐Ÿ‘ฉโ€๐Ÿฆฝ', - '๐Ÿ‘ฉ๐Ÿปโ€๐Ÿฆฝ', - '๐Ÿ‘ฉ๐Ÿผโ€๐Ÿฆฝ', - '๐Ÿ‘ฉ๐Ÿฝโ€๐Ÿฆฝ', - '๐Ÿ‘ฉ๐Ÿพโ€๐Ÿฆฝ', - '๐Ÿ‘ฉ๐Ÿฟโ€๐Ÿฆฝ', - '๐Ÿƒ', - '๐Ÿƒ๐Ÿป', - '๐Ÿƒ๐Ÿผ', - '๐Ÿƒ๐Ÿฝ', - '๐Ÿƒ๐Ÿพ', - '๐Ÿƒ๐Ÿฟ', - '๐Ÿƒโ€โ™‚๏ธ', - '๐Ÿƒโ€โ™‚', - '๐Ÿƒ๐Ÿปโ€โ™‚๏ธ', - '๐Ÿƒ๐Ÿปโ€โ™‚', - '๐Ÿƒ๐Ÿผโ€โ™‚๏ธ', - '๐Ÿƒ๐Ÿผโ€โ™‚', - '๐Ÿƒ๐Ÿฝโ€โ™‚๏ธ', - '๐Ÿƒ๐Ÿฝโ€โ™‚', - '๐Ÿƒ๐Ÿพโ€โ™‚๏ธ', - '๐Ÿƒ๐Ÿพโ€โ™‚', - '๐Ÿƒ๐Ÿฟโ€โ™‚๏ธ', - '๐Ÿƒ๐Ÿฟโ€โ™‚', - '๐Ÿƒโ€โ™€๏ธ', - '๐Ÿƒโ€โ™€', - '๐Ÿƒ๐Ÿปโ€โ™€๏ธ', - '๐Ÿƒ๐Ÿปโ€โ™€', - '๐Ÿƒ๐Ÿผโ€โ™€๏ธ', - '๐Ÿƒ๐Ÿผโ€โ™€', - '๐Ÿƒ๐Ÿฝโ€โ™€๏ธ', - '๐Ÿƒ๐Ÿฝโ€โ™€', - '๐Ÿƒ๐Ÿพโ€โ™€๏ธ', - '๐Ÿƒ๐Ÿพโ€โ™€', - '๐Ÿƒ๐Ÿฟโ€โ™€๏ธ', - '๐Ÿƒ๐Ÿฟโ€โ™€', - '๐Ÿ’ƒ', - '๐Ÿ’ƒ๐Ÿป', - '๐Ÿ’ƒ๐Ÿผ', - '๐Ÿ’ƒ๐Ÿฝ', - '๐Ÿ’ƒ๐Ÿพ', - '๐Ÿ’ƒ๐Ÿฟ', - '๐Ÿ•บ', - '๐Ÿ•บ๐Ÿป', - '๐Ÿ•บ๐Ÿผ', - '๐Ÿ•บ๐Ÿฝ', - '๐Ÿ•บ๐Ÿพ', - '๐Ÿ•บ๐Ÿฟ', - '๐Ÿ•ด๏ธ', - '๐Ÿ•ด', - '๐Ÿ•ด๐Ÿป', - '๐Ÿ•ด๐Ÿผ', - '๐Ÿ•ด๐Ÿฝ', - '๐Ÿ•ด๐Ÿพ', - '๐Ÿ•ด๐Ÿฟ', - '๐Ÿ‘ฏ', - '๐Ÿ‘ฏโ€โ™‚๏ธ', - '๐Ÿ‘ฏโ€โ™‚', - '๐Ÿ‘ฏโ€โ™€๏ธ', - '๐Ÿ‘ฏโ€โ™€', - '๐Ÿง–', - '๐Ÿง–๐Ÿป', - '๐Ÿง–๐Ÿผ', - '๐Ÿง–๐Ÿฝ', - '๐Ÿง–๐Ÿพ', - '๐Ÿง–๐Ÿฟ', - '๐Ÿง–โ€โ™‚๏ธ', - '๐Ÿง–โ€โ™‚', - '๐Ÿง–๐Ÿปโ€โ™‚๏ธ', - '๐Ÿง–๐Ÿปโ€โ™‚', - '๐Ÿง–๐Ÿผโ€โ™‚๏ธ', - '๐Ÿง–๐Ÿผโ€โ™‚', - '๐Ÿง–๐Ÿฝโ€โ™‚๏ธ', - '๐Ÿง–๐Ÿฝโ€โ™‚', - '๐Ÿง–๐Ÿพโ€โ™‚๏ธ', - '๐Ÿง–๐Ÿพโ€โ™‚', - '๐Ÿง–๐Ÿฟโ€โ™‚๏ธ', - '๐Ÿง–๐Ÿฟโ€โ™‚', - '๐Ÿง–โ€โ™€๏ธ', - '๐Ÿง–โ€โ™€', - '๐Ÿง–๐Ÿปโ€โ™€๏ธ', - '๐Ÿง–๐Ÿปโ€โ™€', - '๐Ÿง–๐Ÿผโ€โ™€๏ธ', - '๐Ÿง–๐Ÿผโ€โ™€', - '๐Ÿง–๐Ÿฝโ€โ™€๏ธ', - '๐Ÿง–๐Ÿฝโ€โ™€', - '๐Ÿง–๐Ÿพโ€โ™€๏ธ', - '๐Ÿง–๐Ÿพโ€โ™€', - '๐Ÿง–๐Ÿฟโ€โ™€๏ธ', - '๐Ÿง–๐Ÿฟโ€โ™€', - '๐Ÿง—', - '๐Ÿง—๐Ÿป', - '๐Ÿง—๐Ÿผ', - '๐Ÿง—๐Ÿฝ', - '๐Ÿง—๐Ÿพ', - '๐Ÿง—๐Ÿฟ', - '๐Ÿง—โ€โ™‚๏ธ', - '๐Ÿง—โ€โ™‚', - '๐Ÿง—๐Ÿปโ€โ™‚๏ธ', - '๐Ÿง—๐Ÿปโ€โ™‚', - '๐Ÿง—๐Ÿผโ€โ™‚๏ธ', - '๐Ÿง—๐Ÿผโ€โ™‚', - '๐Ÿง—๐Ÿฝโ€โ™‚๏ธ', - '๐Ÿง—๐Ÿฝโ€โ™‚', - '๐Ÿง—๐Ÿพโ€โ™‚๏ธ', - '๐Ÿง—๐Ÿพโ€โ™‚', - '๐Ÿง—๐Ÿฟโ€โ™‚๏ธ', - '๐Ÿง—๐Ÿฟโ€โ™‚', - '๐Ÿง—โ€โ™€๏ธ', - '๐Ÿง—โ€โ™€', - '๐Ÿง—๐Ÿปโ€โ™€๏ธ', - '๐Ÿง—๐Ÿปโ€โ™€', - '๐Ÿง—๐Ÿผโ€โ™€๏ธ', - '๐Ÿง—๐Ÿผโ€โ™€', - '๐Ÿง—๐Ÿฝโ€โ™€๏ธ', - '๐Ÿง—๐Ÿฝโ€โ™€', - '๐Ÿง—๐Ÿพโ€โ™€๏ธ', - '๐Ÿง—๐Ÿพโ€โ™€', - '๐Ÿง—๐Ÿฟโ€โ™€๏ธ', - '๐Ÿง—๐Ÿฟโ€โ™€', - '๐Ÿคบ', - '๐Ÿ‡', - '๐Ÿ‡๐Ÿป', - '๐Ÿ‡๐Ÿผ', - '๐Ÿ‡๐Ÿฝ', - '๐Ÿ‡๐Ÿพ', - '๐Ÿ‡๐Ÿฟ', - 'โ›ท๏ธ', - 'โ›ท', - '๐Ÿ‚', - '๐Ÿ‚๐Ÿป', - '๐Ÿ‚๐Ÿผ', - '๐Ÿ‚๐Ÿฝ', - '๐Ÿ‚๐Ÿพ', - '๐Ÿ‚๐Ÿฟ', - '๐ŸŒ๏ธ', - '๐ŸŒ', - '๐ŸŒ๐Ÿป', - '๐ŸŒ๐Ÿผ', - '๐ŸŒ๐Ÿฝ', - '๐ŸŒ๐Ÿพ', - '๐ŸŒ๐Ÿฟ', - '๐ŸŒ๏ธโ€โ™‚๏ธ', - '๐ŸŒโ€โ™‚๏ธ', - '๐ŸŒ๏ธโ€โ™‚', - '๐ŸŒโ€โ™‚', - '๐ŸŒ๐Ÿปโ€โ™‚๏ธ', - '๐ŸŒ๐Ÿปโ€โ™‚', - '๐ŸŒ๐Ÿผโ€โ™‚๏ธ', - '๐ŸŒ๐Ÿผโ€โ™‚', - '๐ŸŒ๐Ÿฝโ€โ™‚๏ธ', - '๐ŸŒ๐Ÿฝโ€โ™‚', - '๐ŸŒ๐Ÿพโ€โ™‚๏ธ', - '๐ŸŒ๐Ÿพโ€โ™‚', - '๐ŸŒ๐Ÿฟโ€โ™‚๏ธ', - '๐ŸŒ๐Ÿฟโ€โ™‚', - '๐ŸŒ๏ธโ€โ™€๏ธ', - '๐ŸŒโ€โ™€๏ธ', - '๐ŸŒ๏ธโ€โ™€', - '๐ŸŒโ€โ™€', - '๐ŸŒ๐Ÿปโ€โ™€๏ธ', - '๐ŸŒ๐Ÿปโ€โ™€', - '๐ŸŒ๐Ÿผโ€โ™€๏ธ', - '๐ŸŒ๐Ÿผโ€โ™€', - '๐ŸŒ๐Ÿฝโ€โ™€๏ธ', - '๐ŸŒ๐Ÿฝโ€โ™€', - '๐ŸŒ๐Ÿพโ€โ™€๏ธ', - '๐ŸŒ๐Ÿพโ€โ™€', - '๐ŸŒ๐Ÿฟโ€โ™€๏ธ', - '๐ŸŒ๐Ÿฟโ€โ™€', - '๐Ÿ„', - '๐Ÿ„๐Ÿป', - '๐Ÿ„๐Ÿผ', - '๐Ÿ„๐Ÿฝ', - '๐Ÿ„๐Ÿพ', - '๐Ÿ„๐Ÿฟ', - '๐Ÿ„โ€โ™‚๏ธ', - '๐Ÿ„โ€โ™‚', - '๐Ÿ„๐Ÿปโ€โ™‚๏ธ', - '๐Ÿ„๐Ÿปโ€โ™‚', - '๐Ÿ„๐Ÿผโ€โ™‚๏ธ', - '๐Ÿ„๐Ÿผโ€โ™‚', - '๐Ÿ„๐Ÿฝโ€โ™‚๏ธ', - '๐Ÿ„๐Ÿฝโ€โ™‚', - '๐Ÿ„๐Ÿพโ€โ™‚๏ธ', - '๐Ÿ„๐Ÿพโ€โ™‚', - '๐Ÿ„๐Ÿฟโ€โ™‚๏ธ', - '๐Ÿ„๐Ÿฟโ€โ™‚', - '๐Ÿ„โ€โ™€๏ธ', - '๐Ÿ„โ€โ™€', - '๐Ÿ„๐Ÿปโ€โ™€๏ธ', - '๐Ÿ„๐Ÿปโ€โ™€', - '๐Ÿ„๐Ÿผโ€โ™€๏ธ', - '๐Ÿ„๐Ÿผโ€โ™€', - '๐Ÿ„๐Ÿฝโ€โ™€๏ธ', - '๐Ÿ„๐Ÿฝโ€โ™€', - '๐Ÿ„๐Ÿพโ€โ™€๏ธ', - '๐Ÿ„๐Ÿพโ€โ™€', - '๐Ÿ„๐Ÿฟโ€โ™€๏ธ', - '๐Ÿ„๐Ÿฟโ€โ™€', - '๐Ÿšฃ', - '๐Ÿšฃ๐Ÿป', - '๐Ÿšฃ๐Ÿผ', - '๐Ÿšฃ๐Ÿฝ', - '๐Ÿšฃ๐Ÿพ', - '๐Ÿšฃ๐Ÿฟ', - '๐Ÿšฃโ€โ™‚๏ธ', - '๐Ÿšฃโ€โ™‚', - '๐Ÿšฃ๐Ÿปโ€โ™‚๏ธ', - '๐Ÿšฃ๐Ÿปโ€โ™‚', - '๐Ÿšฃ๐Ÿผโ€โ™‚๏ธ', - '๐Ÿšฃ๐Ÿผโ€โ™‚', - '๐Ÿšฃ๐Ÿฝโ€โ™‚๏ธ', - '๐Ÿšฃ๐Ÿฝโ€โ™‚', - '๐Ÿšฃ๐Ÿพโ€โ™‚๏ธ', - '๐Ÿšฃ๐Ÿพโ€โ™‚', - '๐Ÿšฃ๐Ÿฟโ€โ™‚๏ธ', - '๐Ÿšฃ๐Ÿฟโ€โ™‚', - '๐Ÿšฃโ€โ™€๏ธ', - '๐Ÿšฃโ€โ™€', - '๐Ÿšฃ๐Ÿปโ€โ™€๏ธ', - '๐Ÿšฃ๐Ÿปโ€โ™€', - '๐Ÿšฃ๐Ÿผโ€โ™€๏ธ', - '๐Ÿšฃ๐Ÿผโ€โ™€', - '๐Ÿšฃ๐Ÿฝโ€โ™€๏ธ', - '๐Ÿšฃ๐Ÿฝโ€โ™€', - '๐Ÿšฃ๐Ÿพโ€โ™€๏ธ', - '๐Ÿšฃ๐Ÿพโ€โ™€', - '๐Ÿšฃ๐Ÿฟโ€โ™€๏ธ', - '๐Ÿšฃ๐Ÿฟโ€โ™€', - '๐ŸŠ', - '๐ŸŠ๐Ÿป', - '๐ŸŠ๐Ÿผ', - '๐ŸŠ๐Ÿฝ', - '๐ŸŠ๐Ÿพ', - '๐ŸŠ๐Ÿฟ', - '๐ŸŠโ€โ™‚๏ธ', - '๐ŸŠโ€โ™‚', - '๐ŸŠ๐Ÿปโ€โ™‚๏ธ', - '๐ŸŠ๐Ÿปโ€โ™‚', - '๐ŸŠ๐Ÿผโ€โ™‚๏ธ', - '๐ŸŠ๐Ÿผโ€โ™‚', - '๐ŸŠ๐Ÿฝโ€โ™‚๏ธ', - '๐ŸŠ๐Ÿฝโ€โ™‚', - '๐ŸŠ๐Ÿพโ€โ™‚๏ธ', - '๐ŸŠ๐Ÿพโ€โ™‚', - '๐ŸŠ๐Ÿฟโ€โ™‚๏ธ', - '๐ŸŠ๐Ÿฟโ€โ™‚', - '๐ŸŠโ€โ™€๏ธ', - '๐ŸŠโ€โ™€', - '๐ŸŠ๐Ÿปโ€โ™€๏ธ', - '๐ŸŠ๐Ÿปโ€โ™€', - '๐ŸŠ๐Ÿผโ€โ™€๏ธ', - '๐ŸŠ๐Ÿผโ€โ™€', - '๐ŸŠ๐Ÿฝโ€โ™€๏ธ', - '๐ŸŠ๐Ÿฝโ€โ™€', - '๐ŸŠ๐Ÿพโ€โ™€๏ธ', - '๐ŸŠ๐Ÿพโ€โ™€', - '๐ŸŠ๐Ÿฟโ€โ™€๏ธ', - '๐ŸŠ๐Ÿฟโ€โ™€', - 'โ›น๏ธ', - 'โ›น', - 'โ›น๐Ÿป', - 'โ›น๐Ÿผ', - 'โ›น๐Ÿฝ', - 'โ›น๐Ÿพ', - 'โ›น๐Ÿฟ', - 'โ›น๏ธโ€โ™‚๏ธ', - 'โ›นโ€โ™‚๏ธ', - 'โ›น๏ธโ€โ™‚', - 'โ›นโ€โ™‚', - 'โ›น๐Ÿปโ€โ™‚๏ธ', - 'โ›น๐Ÿปโ€โ™‚', - 'โ›น๐Ÿผโ€โ™‚๏ธ', - 'โ›น๐Ÿผโ€โ™‚', - 'โ›น๐Ÿฝโ€โ™‚๏ธ', - 'โ›น๐Ÿฝโ€โ™‚', - 'โ›น๐Ÿพโ€โ™‚๏ธ', - 'โ›น๐Ÿพโ€โ™‚', - 'โ›น๐Ÿฟโ€โ™‚๏ธ', - 'โ›น๐Ÿฟโ€โ™‚', - 'โ›น๏ธโ€โ™€๏ธ', - 'โ›นโ€โ™€๏ธ', - 'โ›น๏ธโ€โ™€', - 'โ›นโ€โ™€', - 'โ›น๐Ÿปโ€โ™€๏ธ', - 'โ›น๐Ÿปโ€โ™€', - 'โ›น๐Ÿผโ€โ™€๏ธ', - 'โ›น๐Ÿผโ€โ™€', - 'โ›น๐Ÿฝโ€โ™€๏ธ', - 'โ›น๐Ÿฝโ€โ™€', - 'โ›น๐Ÿพโ€โ™€๏ธ', - 'โ›น๐Ÿพโ€โ™€', - 'โ›น๐Ÿฟโ€โ™€๏ธ', - 'โ›น๐Ÿฟโ€โ™€', - '๐Ÿ‹๏ธ', - '๐Ÿ‹', - '๐Ÿ‹๐Ÿป', - '๐Ÿ‹๐Ÿผ', - '๐Ÿ‹๐Ÿฝ', - '๐Ÿ‹๐Ÿพ', - '๐Ÿ‹๐Ÿฟ', - '๐Ÿ‹๏ธโ€โ™‚๏ธ', - '๐Ÿ‹โ€โ™‚๏ธ', - '๐Ÿ‹๏ธโ€โ™‚', - '๐Ÿ‹โ€โ™‚', - '๐Ÿ‹๐Ÿปโ€โ™‚๏ธ', - '๐Ÿ‹๐Ÿปโ€โ™‚', - '๐Ÿ‹๐Ÿผโ€โ™‚๏ธ', - '๐Ÿ‹๐Ÿผโ€โ™‚', - '๐Ÿ‹๐Ÿฝโ€โ™‚๏ธ', - '๐Ÿ‹๐Ÿฝโ€โ™‚', - '๐Ÿ‹๐Ÿพโ€โ™‚๏ธ', - '๐Ÿ‹๐Ÿพโ€โ™‚', - '๐Ÿ‹๐Ÿฟโ€โ™‚๏ธ', - '๐Ÿ‹๐Ÿฟโ€โ™‚', - '๐Ÿ‹๏ธโ€โ™€๏ธ', - '๐Ÿ‹โ€โ™€๏ธ', - '๐Ÿ‹๏ธโ€โ™€', - '๐Ÿ‹โ€โ™€', - '๐Ÿ‹๐Ÿปโ€โ™€๏ธ', - '๐Ÿ‹๐Ÿปโ€โ™€', - '๐Ÿ‹๐Ÿผโ€โ™€๏ธ', - '๐Ÿ‹๐Ÿผโ€โ™€', - '๐Ÿ‹๐Ÿฝโ€โ™€๏ธ', - '๐Ÿ‹๐Ÿฝโ€โ™€', - '๐Ÿ‹๐Ÿพโ€โ™€๏ธ', - '๐Ÿ‹๐Ÿพโ€โ™€', - '๐Ÿ‹๐Ÿฟโ€โ™€๏ธ', - '๐Ÿ‹๐Ÿฟโ€โ™€', - '๐Ÿšด', - '๐Ÿšด๐Ÿป', - '๐Ÿšด๐Ÿผ', - '๐Ÿšด๐Ÿฝ', - '๐Ÿšด๐Ÿพ', - '๐Ÿšด๐Ÿฟ', - '๐Ÿšดโ€โ™‚๏ธ', - '๐Ÿšดโ€โ™‚', - '๐Ÿšด๐Ÿปโ€โ™‚๏ธ', - '๐Ÿšด๐Ÿปโ€โ™‚', - '๐Ÿšด๐Ÿผโ€โ™‚๏ธ', - '๐Ÿšด๐Ÿผโ€โ™‚', - '๐Ÿšด๐Ÿฝโ€โ™‚๏ธ', - '๐Ÿšด๐Ÿฝโ€โ™‚', - '๐Ÿšด๐Ÿพโ€โ™‚๏ธ', - '๐Ÿšด๐Ÿพโ€โ™‚', - '๐Ÿšด๐Ÿฟโ€โ™‚๏ธ', - '๐Ÿšด๐Ÿฟโ€โ™‚', - '๐Ÿšดโ€โ™€๏ธ', - '๐Ÿšดโ€โ™€', - '๐Ÿšด๐Ÿปโ€โ™€๏ธ', - '๐Ÿšด๐Ÿปโ€โ™€', - '๐Ÿšด๐Ÿผโ€โ™€๏ธ', - '๐Ÿšด๐Ÿผโ€โ™€', - '๐Ÿšด๐Ÿฝโ€โ™€๏ธ', - '๐Ÿšด๐Ÿฝโ€โ™€', - '๐Ÿšด๐Ÿพโ€โ™€๏ธ', - '๐Ÿšด๐Ÿพโ€โ™€', - '๐Ÿšด๐Ÿฟโ€โ™€๏ธ', - '๐Ÿšด๐Ÿฟโ€โ™€', - '๐Ÿšต', - '๐Ÿšต๐Ÿป', - '๐Ÿšต๐Ÿผ', - '๐Ÿšต๐Ÿฝ', - '๐Ÿšต๐Ÿพ', - '๐Ÿšต๐Ÿฟ', - '๐Ÿšตโ€โ™‚๏ธ', - '๐Ÿšตโ€โ™‚', - '๐Ÿšต๐Ÿปโ€โ™‚๏ธ', - '๐Ÿšต๐Ÿปโ€โ™‚', - '๐Ÿšต๐Ÿผโ€โ™‚๏ธ', - '๐Ÿšต๐Ÿผโ€โ™‚', - '๐Ÿšต๐Ÿฝโ€โ™‚๏ธ', - '๐Ÿšต๐Ÿฝโ€โ™‚', - '๐Ÿšต๐Ÿพโ€โ™‚๏ธ', - '๐Ÿšต๐Ÿพโ€โ™‚', - '๐Ÿšต๐Ÿฟโ€โ™‚๏ธ', - '๐Ÿšต๐Ÿฟโ€โ™‚', - '๐Ÿšตโ€โ™€๏ธ', - '๐Ÿšตโ€โ™€', - '๐Ÿšต๐Ÿปโ€โ™€๏ธ', - '๐Ÿšต๐Ÿปโ€โ™€', - '๐Ÿšต๐Ÿผโ€โ™€๏ธ', - '๐Ÿšต๐Ÿผโ€โ™€', - '๐Ÿšต๐Ÿฝโ€โ™€๏ธ', - '๐Ÿšต๐Ÿฝโ€โ™€', - '๐Ÿšต๐Ÿพโ€โ™€๏ธ', - '๐Ÿšต๐Ÿพโ€โ™€', - '๐Ÿšต๐Ÿฟโ€โ™€๏ธ', - '๐Ÿšต๐Ÿฟโ€โ™€', - '๐Ÿคธ', - '๐Ÿคธ๐Ÿป', - '๐Ÿคธ๐Ÿผ', - '๐Ÿคธ๐Ÿฝ', - '๐Ÿคธ๐Ÿพ', - '๐Ÿคธ๐Ÿฟ', - '๐Ÿคธโ€โ™‚๏ธ', - '๐Ÿคธโ€โ™‚', - '๐Ÿคธ๐Ÿปโ€โ™‚๏ธ', - '๐Ÿคธ๐Ÿปโ€โ™‚', - '๐Ÿคธ๐Ÿผโ€โ™‚๏ธ', - '๐Ÿคธ๐Ÿผโ€โ™‚', - '๐Ÿคธ๐Ÿฝโ€โ™‚๏ธ', - '๐Ÿคธ๐Ÿฝโ€โ™‚', - '๐Ÿคธ๐Ÿพโ€โ™‚๏ธ', - '๐Ÿคธ๐Ÿพโ€โ™‚', - '๐Ÿคธ๐Ÿฟโ€โ™‚๏ธ', - '๐Ÿคธ๐Ÿฟโ€โ™‚', - '๐Ÿคธโ€โ™€๏ธ', - '๐Ÿคธโ€โ™€', - '๐Ÿคธ๐Ÿปโ€โ™€๏ธ', - '๐Ÿคธ๐Ÿปโ€โ™€', - '๐Ÿคธ๐Ÿผโ€โ™€๏ธ', - '๐Ÿคธ๐Ÿผโ€โ™€', - '๐Ÿคธ๐Ÿฝโ€โ™€๏ธ', - '๐Ÿคธ๐Ÿฝโ€โ™€', - '๐Ÿคธ๐Ÿพโ€โ™€๏ธ', - '๐Ÿคธ๐Ÿพโ€โ™€', - '๐Ÿคธ๐Ÿฟโ€โ™€๏ธ', - '๐Ÿคธ๐Ÿฟโ€โ™€', - '๐Ÿคผ', - '๐Ÿคผโ€โ™‚๏ธ', - '๐Ÿคผโ€โ™‚', - '๐Ÿคผโ€โ™€๏ธ', - '๐Ÿคผโ€โ™€', - '๐Ÿคฝ', - '๐Ÿคฝ๐Ÿป', - '๐Ÿคฝ๐Ÿผ', - '๐Ÿคฝ๐Ÿฝ', - '๐Ÿคฝ๐Ÿพ', - '๐Ÿคฝ๐Ÿฟ', - '๐Ÿคฝโ€โ™‚๏ธ', - '๐Ÿคฝโ€โ™‚', - '๐Ÿคฝ๐Ÿปโ€โ™‚๏ธ', - '๐Ÿคฝ๐Ÿปโ€โ™‚', - '๐Ÿคฝ๐Ÿผโ€โ™‚๏ธ', - '๐Ÿคฝ๐Ÿผโ€โ™‚', - '๐Ÿคฝ๐Ÿฝโ€โ™‚๏ธ', - '๐Ÿคฝ๐Ÿฝโ€โ™‚', - '๐Ÿคฝ๐Ÿพโ€โ™‚๏ธ', - '๐Ÿคฝ๐Ÿพโ€โ™‚', - '๐Ÿคฝ๐Ÿฟโ€โ™‚๏ธ', - '๐Ÿคฝ๐Ÿฟโ€โ™‚', - '๐Ÿคฝโ€โ™€๏ธ', - '๐Ÿคฝโ€โ™€', - '๐Ÿคฝ๐Ÿปโ€โ™€๏ธ', - '๐Ÿคฝ๐Ÿปโ€โ™€', - '๐Ÿคฝ๐Ÿผโ€โ™€๏ธ', - '๐Ÿคฝ๐Ÿผโ€โ™€', - '๐Ÿคฝ๐Ÿฝโ€โ™€๏ธ', - '๐Ÿคฝ๐Ÿฝโ€โ™€', - '๐Ÿคฝ๐Ÿพโ€โ™€๏ธ', - '๐Ÿคฝ๐Ÿพโ€โ™€', - '๐Ÿคฝ๐Ÿฟโ€โ™€๏ธ', - '๐Ÿคฝ๐Ÿฟโ€โ™€', - '๐Ÿคพ', - '๐Ÿคพ๐Ÿป', - '๐Ÿคพ๐Ÿผ', - '๐Ÿคพ๐Ÿฝ', - '๐Ÿคพ๐Ÿพ', - '๐Ÿคพ๐Ÿฟ', - '๐Ÿคพโ€โ™‚๏ธ', - '๐Ÿคพโ€โ™‚', - '๐Ÿคพ๐Ÿปโ€โ™‚๏ธ', - '๐Ÿคพ๐Ÿปโ€โ™‚', - '๐Ÿคพ๐Ÿผโ€โ™‚๏ธ', - '๐Ÿคพ๐Ÿผโ€โ™‚', - '๐Ÿคพ๐Ÿฝโ€โ™‚๏ธ', - '๐Ÿคพ๐Ÿฝโ€โ™‚', - '๐Ÿคพ๐Ÿพโ€โ™‚๏ธ', - '๐Ÿคพ๐Ÿพโ€โ™‚', - '๐Ÿคพ๐Ÿฟโ€โ™‚๏ธ', - '๐Ÿคพ๐Ÿฟโ€โ™‚', - '๐Ÿคพโ€โ™€๏ธ', - '๐Ÿคพโ€โ™€', - '๐Ÿคพ๐Ÿปโ€โ™€๏ธ', - '๐Ÿคพ๐Ÿปโ€โ™€', - '๐Ÿคพ๐Ÿผโ€โ™€๏ธ', - '๐Ÿคพ๐Ÿผโ€โ™€', - '๐Ÿคพ๐Ÿฝโ€โ™€๏ธ', - '๐Ÿคพ๐Ÿฝโ€โ™€', - '๐Ÿคพ๐Ÿพโ€โ™€๏ธ', - '๐Ÿคพ๐Ÿพโ€โ™€', - '๐Ÿคพ๐Ÿฟโ€โ™€๏ธ', - '๐Ÿคพ๐Ÿฟโ€โ™€', - '๐Ÿคน', - '๐Ÿคน๐Ÿป', - '๐Ÿคน๐Ÿผ', - '๐Ÿคน๐Ÿฝ', - '๐Ÿคน๐Ÿพ', - '๐Ÿคน๐Ÿฟ', - '๐Ÿคนโ€โ™‚๏ธ', - '๐Ÿคนโ€โ™‚', - '๐Ÿคน๐Ÿปโ€โ™‚๏ธ', - '๐Ÿคน๐Ÿปโ€โ™‚', - '๐Ÿคน๐Ÿผโ€โ™‚๏ธ', - '๐Ÿคน๐Ÿผโ€โ™‚', - '๐Ÿคน๐Ÿฝโ€โ™‚๏ธ', - '๐Ÿคน๐Ÿฝโ€โ™‚', - '๐Ÿคน๐Ÿพโ€โ™‚๏ธ', - '๐Ÿคน๐Ÿพโ€โ™‚', - '๐Ÿคน๐Ÿฟโ€โ™‚๏ธ', - '๐Ÿคน๐Ÿฟโ€โ™‚', - '๐Ÿคนโ€โ™€๏ธ', - '๐Ÿคนโ€โ™€', - '๐Ÿคน๐Ÿปโ€โ™€๏ธ', - '๐Ÿคน๐Ÿปโ€โ™€', - '๐Ÿคน๐Ÿผโ€โ™€๏ธ', - '๐Ÿคน๐Ÿผโ€โ™€', - '๐Ÿคน๐Ÿฝโ€โ™€๏ธ', - '๐Ÿคน๐Ÿฝโ€โ™€', - '๐Ÿคน๐Ÿพโ€โ™€๏ธ', - '๐Ÿคน๐Ÿพโ€โ™€', - '๐Ÿคน๐Ÿฟโ€โ™€๏ธ', - '๐Ÿคน๐Ÿฟโ€โ™€', - '๐Ÿง˜', - '๐Ÿง˜๐Ÿป', - '๐Ÿง˜๐Ÿผ', - '๐Ÿง˜๐Ÿฝ', - '๐Ÿง˜๐Ÿพ', - '๐Ÿง˜๐Ÿฟ', - '๐Ÿง˜โ€โ™‚๏ธ', - '๐Ÿง˜โ€โ™‚', - '๐Ÿง˜๐Ÿปโ€โ™‚๏ธ', - '๐Ÿง˜๐Ÿปโ€โ™‚', - '๐Ÿง˜๐Ÿผโ€โ™‚๏ธ', - '๐Ÿง˜๐Ÿผโ€โ™‚', - '๐Ÿง˜๐Ÿฝโ€โ™‚๏ธ', - '๐Ÿง˜๐Ÿฝโ€โ™‚', - '๐Ÿง˜๐Ÿพโ€โ™‚๏ธ', - '๐Ÿง˜๐Ÿพโ€โ™‚', - '๐Ÿง˜๐Ÿฟโ€โ™‚๏ธ', - '๐Ÿง˜๐Ÿฟโ€โ™‚', - '๐Ÿง˜โ€โ™€๏ธ', - '๐Ÿง˜โ€โ™€', - '๐Ÿง˜๐Ÿปโ€โ™€๏ธ', - '๐Ÿง˜๐Ÿปโ€โ™€', - '๐Ÿง˜๐Ÿผโ€โ™€๏ธ', - '๐Ÿง˜๐Ÿผโ€โ™€', - '๐Ÿง˜๐Ÿฝโ€โ™€๏ธ', - '๐Ÿง˜๐Ÿฝโ€โ™€', - '๐Ÿง˜๐Ÿพโ€โ™€๏ธ', - '๐Ÿง˜๐Ÿพโ€โ™€', - '๐Ÿง˜๐Ÿฟโ€โ™€๏ธ', - '๐Ÿง˜๐Ÿฟโ€โ™€', - '๐Ÿ›€', - '๐Ÿ›€๐Ÿป', - '๐Ÿ›€๐Ÿผ', - '๐Ÿ›€๐Ÿฝ', - '๐Ÿ›€๐Ÿพ', - '๐Ÿ›€๐Ÿฟ', - '๐Ÿ›Œ', - '๐Ÿ›Œ๐Ÿป', - '๐Ÿ›Œ๐Ÿผ', - '๐Ÿ›Œ๐Ÿฝ', - '๐Ÿ›Œ๐Ÿพ', - '๐Ÿ›Œ๐Ÿฟ', - '๐Ÿง‘โ€๐Ÿคโ€๐Ÿง‘', - '๐Ÿง‘๐Ÿปโ€๐Ÿคโ€๐Ÿง‘๐Ÿป', - '๐Ÿง‘๐Ÿผโ€๐Ÿคโ€๐Ÿง‘๐Ÿป', - '๐Ÿง‘๐Ÿผโ€๐Ÿคโ€๐Ÿง‘๐Ÿผ', - '๐Ÿง‘๐Ÿฝโ€๐Ÿคโ€๐Ÿง‘๐Ÿป', - '๐Ÿง‘๐Ÿฝโ€๐Ÿคโ€๐Ÿง‘๐Ÿผ', - '๐Ÿง‘๐Ÿฝโ€๐Ÿคโ€๐Ÿง‘๐Ÿฝ', - '๐Ÿง‘๐Ÿพโ€๐Ÿคโ€๐Ÿง‘๐Ÿป', - '๐Ÿง‘๐Ÿพโ€๐Ÿคโ€๐Ÿง‘๐Ÿผ', - '๐Ÿง‘๐Ÿพโ€๐Ÿคโ€๐Ÿง‘๐Ÿฝ', - '๐Ÿง‘๐Ÿพโ€๐Ÿคโ€๐Ÿง‘๐Ÿพ', - '๐Ÿง‘๐Ÿฟโ€๐Ÿคโ€๐Ÿง‘๐Ÿป', - '๐Ÿง‘๐Ÿฟโ€๐Ÿคโ€๐Ÿง‘๐Ÿผ', - '๐Ÿง‘๐Ÿฟโ€๐Ÿคโ€๐Ÿง‘๐Ÿฝ', - '๐Ÿง‘๐Ÿฟโ€๐Ÿคโ€๐Ÿง‘๐Ÿพ', - '๐Ÿง‘๐Ÿฟโ€๐Ÿคโ€๐Ÿง‘๐Ÿฟ', - '๐Ÿ‘ญ', - '๐Ÿ‘ญ๐Ÿป', - '๐Ÿ‘ฉ๐Ÿผโ€๐Ÿคโ€๐Ÿ‘ฉ๐Ÿป', - '๐Ÿ‘ญ๐Ÿผ', - '๐Ÿ‘ฉ๐Ÿฝโ€๐Ÿคโ€๐Ÿ‘ฉ๐Ÿป', - '๐Ÿ‘ฉ๐Ÿฝโ€๐Ÿคโ€๐Ÿ‘ฉ๐Ÿผ', - '๐Ÿ‘ญ๐Ÿฝ', - '๐Ÿ‘ฉ๐Ÿพโ€๐Ÿคโ€๐Ÿ‘ฉ๐Ÿป', - '๐Ÿ‘ฉ๐Ÿพโ€๐Ÿคโ€๐Ÿ‘ฉ๐Ÿผ', - '๐Ÿ‘ฉ๐Ÿพโ€๐Ÿคโ€๐Ÿ‘ฉ๐Ÿฝ', - '๐Ÿ‘ญ๐Ÿพ', - '๐Ÿ‘ฉ๐Ÿฟโ€๐Ÿคโ€๐Ÿ‘ฉ๐Ÿป', - '๐Ÿ‘ฉ๐Ÿฟโ€๐Ÿคโ€๐Ÿ‘ฉ๐Ÿผ', - '๐Ÿ‘ฉ๐Ÿฟโ€๐Ÿคโ€๐Ÿ‘ฉ๐Ÿฝ', - '๐Ÿ‘ฉ๐Ÿฟโ€๐Ÿคโ€๐Ÿ‘ฉ๐Ÿพ', - '๐Ÿ‘ญ๐Ÿฟ', - '๐Ÿ‘ซ', - '๐Ÿ‘ซ๐Ÿป', - '๐Ÿ‘ฉ๐Ÿปโ€๐Ÿคโ€๐Ÿ‘จ๐Ÿผ', - '๐Ÿ‘ฉ๐Ÿปโ€๐Ÿคโ€๐Ÿ‘จ๐Ÿฝ', - '๐Ÿ‘ฉ๐Ÿปโ€๐Ÿคโ€๐Ÿ‘จ๐Ÿพ', - '๐Ÿ‘ฉ๐Ÿปโ€๐Ÿคโ€๐Ÿ‘จ๐Ÿฟ', - '๐Ÿ‘ฉ๐Ÿผโ€๐Ÿคโ€๐Ÿ‘จ๐Ÿป', - '๐Ÿ‘ซ๐Ÿผ', - '๐Ÿ‘ฉ๐Ÿผโ€๐Ÿคโ€๐Ÿ‘จ๐Ÿฝ', - '๐Ÿ‘ฉ๐Ÿผโ€๐Ÿคโ€๐Ÿ‘จ๐Ÿพ', - '๐Ÿ‘ฉ๐Ÿผโ€๐Ÿคโ€๐Ÿ‘จ๐Ÿฟ', - '๐Ÿ‘ฉ๐Ÿฝโ€๐Ÿคโ€๐Ÿ‘จ๐Ÿป', - '๐Ÿ‘ฉ๐Ÿฝโ€๐Ÿคโ€๐Ÿ‘จ๐Ÿผ', - '๐Ÿ‘ซ๐Ÿฝ', - '๐Ÿ‘ฉ๐Ÿฝโ€๐Ÿคโ€๐Ÿ‘จ๐Ÿพ', - '๐Ÿ‘ฉ๐Ÿฝโ€๐Ÿคโ€๐Ÿ‘จ๐Ÿฟ', - '๐Ÿ‘ฉ๐Ÿพโ€๐Ÿคโ€๐Ÿ‘จ๐Ÿป', - '๐Ÿ‘ฉ๐Ÿพโ€๐Ÿคโ€๐Ÿ‘จ๐Ÿผ', - '๐Ÿ‘ฉ๐Ÿพโ€๐Ÿคโ€๐Ÿ‘จ๐Ÿฝ', - '๐Ÿ‘ซ๐Ÿพ', - '๐Ÿ‘ฉ๐Ÿพโ€๐Ÿคโ€๐Ÿ‘จ๐Ÿฟ', - '๐Ÿ‘ฉ๐Ÿฟโ€๐Ÿคโ€๐Ÿ‘จ๐Ÿป', - '๐Ÿ‘ฉ๐Ÿฟโ€๐Ÿคโ€๐Ÿ‘จ๐Ÿผ', - '๐Ÿ‘ฉ๐Ÿฟโ€๐Ÿคโ€๐Ÿ‘จ๐Ÿฝ', - '๐Ÿ‘ฉ๐Ÿฟโ€๐Ÿคโ€๐Ÿ‘จ๐Ÿพ', - '๐Ÿ‘ซ๐Ÿฟ', - '๐Ÿ‘ฌ', - '๐Ÿ‘ฌ๐Ÿป', - '๐Ÿ‘จ๐Ÿผโ€๐Ÿคโ€๐Ÿ‘จ๐Ÿป', - '๐Ÿ‘ฌ๐Ÿผ', - '๐Ÿ‘จ๐Ÿฝโ€๐Ÿคโ€๐Ÿ‘จ๐Ÿป', - '๐Ÿ‘จ๐Ÿฝโ€๐Ÿคโ€๐Ÿ‘จ๐Ÿผ', - '๐Ÿ‘ฌ๐Ÿฝ', - '๐Ÿ‘จ๐Ÿพโ€๐Ÿคโ€๐Ÿ‘จ๐Ÿป', - '๐Ÿ‘จ๐Ÿพโ€๐Ÿคโ€๐Ÿ‘จ๐Ÿผ', - '๐Ÿ‘จ๐Ÿพโ€๐Ÿคโ€๐Ÿ‘จ๐Ÿฝ', - '๐Ÿ‘ฌ๐Ÿพ', - '๐Ÿ‘จ๐Ÿฟโ€๐Ÿคโ€๐Ÿ‘จ๐Ÿป', - '๐Ÿ‘จ๐Ÿฟโ€๐Ÿคโ€๐Ÿ‘จ๐Ÿผ', - '๐Ÿ‘จ๐Ÿฟโ€๐Ÿคโ€๐Ÿ‘จ๐Ÿฝ', - '๐Ÿ‘จ๐Ÿฟโ€๐Ÿคโ€๐Ÿ‘จ๐Ÿพ', - '๐Ÿ‘ฌ๐Ÿฟ', - '๐Ÿ’', - '๐Ÿ‘ฉโ€โค๏ธโ€๐Ÿ’‹โ€๐Ÿ‘จ', - '๐Ÿ‘ฉโ€โคโ€๐Ÿ’‹โ€๐Ÿ‘จ', - '๐Ÿ‘จโ€โค๏ธโ€๐Ÿ’‹โ€๐Ÿ‘จ', - '๐Ÿ‘จโ€โคโ€๐Ÿ’‹โ€๐Ÿ‘จ', - '๐Ÿ‘ฉโ€โค๏ธโ€๐Ÿ’‹โ€๐Ÿ‘ฉ', - '๐Ÿ‘ฉโ€โคโ€๐Ÿ’‹โ€๐Ÿ‘ฉ', - '๐Ÿ’‘', - '๐Ÿ‘ฉโ€โค๏ธโ€๐Ÿ‘จ', - '๐Ÿ‘ฉโ€โคโ€๐Ÿ‘จ', - '๐Ÿ‘จโ€โค๏ธโ€๐Ÿ‘จ', - '๐Ÿ‘จโ€โคโ€๐Ÿ‘จ', - '๐Ÿ‘ฉโ€โค๏ธโ€๐Ÿ‘ฉ', - '๐Ÿ‘ฉโ€โคโ€๐Ÿ‘ฉ', - '๐Ÿ‘ช', - '๐Ÿ‘จโ€๐Ÿ‘ฉโ€๐Ÿ‘ฆ', - '๐Ÿ‘จโ€๐Ÿ‘ฉโ€๐Ÿ‘ง', - '๐Ÿ‘จโ€๐Ÿ‘ฉโ€๐Ÿ‘งโ€๐Ÿ‘ฆ', - '๐Ÿ‘จโ€๐Ÿ‘ฉโ€๐Ÿ‘ฆโ€๐Ÿ‘ฆ', - '๐Ÿ‘จโ€๐Ÿ‘ฉโ€๐Ÿ‘งโ€๐Ÿ‘ง', - '๐Ÿ‘จโ€๐Ÿ‘จโ€๐Ÿ‘ฆ', - '๐Ÿ‘จโ€๐Ÿ‘จโ€๐Ÿ‘ง', - '๐Ÿ‘จโ€๐Ÿ‘จโ€๐Ÿ‘งโ€๐Ÿ‘ฆ', - '๐Ÿ‘จโ€๐Ÿ‘จโ€๐Ÿ‘ฆโ€๐Ÿ‘ฆ', - '๐Ÿ‘จโ€๐Ÿ‘จโ€๐Ÿ‘งโ€๐Ÿ‘ง', - '๐Ÿ‘ฉโ€๐Ÿ‘ฉโ€๐Ÿ‘ฆ', - '๐Ÿ‘ฉโ€๐Ÿ‘ฉโ€๐Ÿ‘ง', - '๐Ÿ‘ฉโ€๐Ÿ‘ฉโ€๐Ÿ‘งโ€๐Ÿ‘ฆ', - '๐Ÿ‘ฉโ€๐Ÿ‘ฉโ€๐Ÿ‘ฆโ€๐Ÿ‘ฆ', - '๐Ÿ‘ฉโ€๐Ÿ‘ฉโ€๐Ÿ‘งโ€๐Ÿ‘ง', - '๐Ÿ‘จโ€๐Ÿ‘ฆ', - '๐Ÿ‘จโ€๐Ÿ‘ฆโ€๐Ÿ‘ฆ', - '๐Ÿ‘จโ€๐Ÿ‘ง', - '๐Ÿ‘จโ€๐Ÿ‘งโ€๐Ÿ‘ฆ', - '๐Ÿ‘จโ€๐Ÿ‘งโ€๐Ÿ‘ง', - '๐Ÿ‘ฉโ€๐Ÿ‘ฆ', - '๐Ÿ‘ฉโ€๐Ÿ‘ฆโ€๐Ÿ‘ฆ', - '๐Ÿ‘ฉโ€๐Ÿ‘ง', - '๐Ÿ‘ฉโ€๐Ÿ‘งโ€๐Ÿ‘ฆ', - '๐Ÿ‘ฉโ€๐Ÿ‘งโ€๐Ÿ‘ง', - '๐Ÿ—ฃ๏ธ', - '๐Ÿ—ฃ', - '๐Ÿ‘ค', - '๐Ÿ‘ฅ', - '๐Ÿ‘ฃ', - '๐Ÿป', - '๐Ÿผ', - '๐Ÿฝ', - '๐Ÿพ', - '๐Ÿฟ', - '๐Ÿฆฐ', - '๐Ÿฆฑ', - '๐Ÿฆณ', - '๐Ÿฆฒ', - '๐Ÿต', - '๐Ÿ’', - '๐Ÿฆ', - '๐Ÿฆง', - '๐Ÿถ', - '๐Ÿ•', - '๐Ÿฆฎ', - '๐Ÿ•โ€๐Ÿฆบ', - '๐Ÿฉ', - '๐Ÿบ', - '๐ŸฆŠ', - '๐Ÿฆ', - '๐Ÿฑ', - '๐Ÿˆ', - '๐Ÿฆ', - '๐Ÿฏ', - '๐Ÿ…', - '๐Ÿ†', - '๐Ÿด', - '๐ŸŽ', - '๐Ÿฆ„', - '๐Ÿฆ“', - '๐ŸฆŒ', - '๐Ÿฎ', - '๐Ÿ‚', - '๐Ÿƒ', - '๐Ÿ„', - '๐Ÿท', - '๐Ÿ–', - '๐Ÿ—', - '๐Ÿฝ', - '๐Ÿ', - '๐Ÿ‘', - '๐Ÿ', - '๐Ÿช', - '๐Ÿซ', - '๐Ÿฆ™', - '๐Ÿฆ’', - '๐Ÿ˜', - '๐Ÿฆ', - '๐Ÿฆ›', - '๐Ÿญ', - '๐Ÿ', - '๐Ÿ€', - '๐Ÿน', - '๐Ÿฐ', - '๐Ÿ‡', - '๐Ÿฟ๏ธ', - '๐Ÿฟ', - '๐Ÿฆ”', - '๐Ÿฆ‡', - '๐Ÿป', - '๐Ÿจ', - '๐Ÿผ', - '๐Ÿฆฅ', - '๐Ÿฆฆ', - '๐Ÿฆจ', - '๐Ÿฆ˜', - '๐Ÿฆก', - '๐Ÿพ', - '๐Ÿฆƒ', - '๐Ÿ”', - '๐Ÿ“', - '๐Ÿฃ', - '๐Ÿค', - '๐Ÿฅ', - '๐Ÿฆ', - '๐Ÿง', - '๐Ÿ•Š๏ธ', - '๐Ÿ•Š', - '๐Ÿฆ…', - '๐Ÿฆ†', - '๐Ÿฆข', - '๐Ÿฆ‰', - '๐Ÿฆฉ', - '๐Ÿฆš', - '๐Ÿฆœ', - '๐Ÿธ', - '๐ŸŠ', - '๐Ÿข', - '๐ŸฆŽ', - '๐Ÿ', - '๐Ÿฒ', - '๐Ÿ‰', - '๐Ÿฆ•', - '๐Ÿฆ–', - '๐Ÿณ', - '๐Ÿ‹', - '๐Ÿฌ', - '๐ŸŸ', - '๐Ÿ ', - '๐Ÿก', - '๐Ÿฆˆ', - '๐Ÿ™', - '๐Ÿš', - '๐ŸŒ', - '๐Ÿฆ‹', - '๐Ÿ›', - '๐Ÿœ', - '๐Ÿ', - '๐Ÿž', - '๐Ÿฆ—', - '๐Ÿ•ท๏ธ', - '๐Ÿ•ท', - '๐Ÿ•ธ๏ธ', - '๐Ÿ•ธ', - '๐Ÿฆ‚', - '๐ŸฆŸ', - '๐Ÿฆ ', - '๐Ÿ’', - '๐ŸŒธ', - '๐Ÿ’ฎ', - '๐Ÿต๏ธ', - '๐Ÿต', - '๐ŸŒน', - '๐Ÿฅ€', - '๐ŸŒบ', - '๐ŸŒป', - '๐ŸŒผ', - '๐ŸŒท', - '๐ŸŒฑ', - '๐ŸŒฒ', - '๐ŸŒณ', - '๐ŸŒด', - '๐ŸŒต', - '๐ŸŒพ', - '๐ŸŒฟ', - 'โ˜˜๏ธ', - 'โ˜˜', - '๐Ÿ€', - '๐Ÿ', - '๐Ÿ‚', - '๐Ÿƒ', - '๐Ÿ‡', - '๐Ÿˆ', - '๐Ÿ‰', - '๐ŸŠ', - '๐Ÿ‹', - '๐ŸŒ', - '๐Ÿ', - '๐Ÿฅญ', - '๐ŸŽ', - '๐Ÿ', - '๐Ÿ', - '๐Ÿ‘', - '๐Ÿ’', - '๐Ÿ“', - '๐Ÿฅ', - '๐Ÿ…', - '๐Ÿฅฅ', - '๐Ÿฅ‘', - '๐Ÿ†', - '๐Ÿฅ”', - '๐Ÿฅ•', - '๐ŸŒฝ', - '๐ŸŒถ๏ธ', - '๐ŸŒถ', - '๐Ÿฅ’', - '๐Ÿฅฌ', - '๐Ÿฅฆ', - '๐Ÿง„', - '๐Ÿง…', - '๐Ÿ„', - '๐Ÿฅœ', - '๐ŸŒฐ', - '๐Ÿž', - '๐Ÿฅ', - '๐Ÿฅ–', - '๐Ÿฅจ', - '๐Ÿฅฏ', - '๐Ÿฅž', - '๐Ÿง‡', - '๐Ÿง€', - '๐Ÿ–', - '๐Ÿ—', - '๐Ÿฅฉ', - '๐Ÿฅ“', - '๐Ÿ”', - '๐ŸŸ', - '๐Ÿ•', - '๐ŸŒญ', - '๐Ÿฅช', - '๐ŸŒฎ', - '๐ŸŒฏ', - '๐Ÿฅ™', - '๐Ÿง†', - '๐Ÿฅš', - '๐Ÿณ', - '๐Ÿฅ˜', - '๐Ÿฒ', - '๐Ÿฅฃ', - '๐Ÿฅ—', - '๐Ÿฟ', - '๐Ÿงˆ', - '๐Ÿง‚', - '๐Ÿฅซ', - '๐Ÿฑ', - '๐Ÿ˜', - '๐Ÿ™', - '๐Ÿš', - '๐Ÿ›', - '๐Ÿœ', - '๐Ÿ', - '๐Ÿ ', - '๐Ÿข', - '๐Ÿฃ', - '๐Ÿค', - '๐Ÿฅ', - '๐Ÿฅฎ', - '๐Ÿก', - '๐ŸฅŸ', - '๐Ÿฅ ', - '๐Ÿฅก', - '๐Ÿฆ€', - '๐Ÿฆž', - '๐Ÿฆ', - '๐Ÿฆ‘', - '๐Ÿฆช', - '๐Ÿฆ', - '๐Ÿง', - '๐Ÿจ', - '๐Ÿฉ', - '๐Ÿช', - '๐ŸŽ‚', - '๐Ÿฐ', - '๐Ÿง', - '๐Ÿฅง', - '๐Ÿซ', - '๐Ÿฌ', - '๐Ÿญ', - '๐Ÿฎ', - '๐Ÿฏ', - '๐Ÿผ', - '๐Ÿฅ›', - 'โ˜•', - '๐Ÿต', - '๐Ÿถ', - '๐Ÿพ', - '๐Ÿท', - '๐Ÿธ', - '๐Ÿน', - '๐Ÿบ', - '๐Ÿป', - '๐Ÿฅ‚', - '๐Ÿฅƒ', - '๐Ÿฅค', - '๐Ÿงƒ', - '๐Ÿง‰', - '๐ŸงŠ', - '๐Ÿฅข', - '๐Ÿฝ๏ธ', - '๐Ÿฝ', - '๐Ÿด', - '๐Ÿฅ„', - '๐Ÿ”ช', - '๐Ÿบ', - '๐ŸŒ', - '๐ŸŒŽ', - '๐ŸŒ', - '๐ŸŒ', - '๐Ÿ—บ๏ธ', - '๐Ÿ—บ', - '๐Ÿ—พ', - '๐Ÿงญ', - '๐Ÿ”๏ธ', - '๐Ÿ”', - 'โ›ฐ๏ธ', - 'โ›ฐ', - '๐ŸŒ‹', - '๐Ÿ—ป', - '๐Ÿ•๏ธ', - '๐Ÿ•', - '๐Ÿ–๏ธ', - '๐Ÿ–', - '๐Ÿœ๏ธ', - '๐Ÿœ', - '๐Ÿ๏ธ', - '๐Ÿ', - '๐Ÿž๏ธ', - '๐Ÿž', - '๐ŸŸ๏ธ', - '๐ŸŸ', - '๐Ÿ›๏ธ', - '๐Ÿ›', - '๐Ÿ—๏ธ', - '๐Ÿ—', - '๐Ÿงฑ', - '๐Ÿ˜๏ธ', - '๐Ÿ˜', - '๐Ÿš๏ธ', - '๐Ÿš', - '๐Ÿ ', - '๐Ÿก', - '๐Ÿข', - '๐Ÿฃ', - '๐Ÿค', - '๐Ÿฅ', - '๐Ÿฆ', - '๐Ÿจ', - '๐Ÿฉ', - '๐Ÿช', - '๐Ÿซ', - '๐Ÿฌ', - '๐Ÿญ', - '๐Ÿฏ', - '๐Ÿฐ', - '๐Ÿ’’', - '๐Ÿ—ผ', - '๐Ÿ—ฝ', - 'โ›ช', - '๐Ÿ•Œ', - '๐Ÿ›•', - '๐Ÿ•', - 'โ›ฉ๏ธ', - 'โ›ฉ', - '๐Ÿ•‹', - 'โ›ฒ', - 'โ›บ', - '๐ŸŒ', - '๐ŸŒƒ', - '๐Ÿ™๏ธ', - '๐Ÿ™', - '๐ŸŒ„', - '๐ŸŒ…', - '๐ŸŒ†', - '๐ŸŒ‡', - '๐ŸŒ‰', - 'โ™จ๏ธ', - 'โ™จ', - '๐ŸŽ ', - '๐ŸŽก', - '๐ŸŽข', - '๐Ÿ’ˆ', - '๐ŸŽช', - '๐Ÿš‚', - '๐Ÿšƒ', - '๐Ÿš„', - '๐Ÿš…', - '๐Ÿš†', - '๐Ÿš‡', - '๐Ÿšˆ', - '๐Ÿš‰', - '๐ŸšŠ', - '๐Ÿš', - '๐Ÿšž', - '๐Ÿš‹', - '๐ŸšŒ', - '๐Ÿš', - '๐ŸšŽ', - '๐Ÿš', - '๐Ÿš‘', - '๐Ÿš’', - '๐Ÿš“', - '๐Ÿš”', - '๐Ÿš•', - '๐Ÿš–', - '๐Ÿš—', - '๐Ÿš˜', - '๐Ÿš™', - '๐Ÿšš', - '๐Ÿš›', - '๐Ÿšœ', - '๐ŸŽ๏ธ', - '๐ŸŽ', - '๐Ÿ๏ธ', - '๐Ÿ', - '๐Ÿ›ต', - '๐Ÿฆฝ', - '๐Ÿฆผ', - '๐Ÿ›บ', - '๐Ÿšฒ', - '๐Ÿ›ด', - '๐Ÿ›น', - '๐Ÿš', - '๐Ÿ›ฃ๏ธ', - '๐Ÿ›ฃ', - '๐Ÿ›ค๏ธ', - '๐Ÿ›ค', - '๐Ÿ›ข๏ธ', - '๐Ÿ›ข', - 'โ›ฝ', - '๐Ÿšจ', - '๐Ÿšฅ', - '๐Ÿšฆ', - '๐Ÿ›‘', - '๐Ÿšง', - 'โš“', - 'โ›ต', - '๐Ÿ›ถ', - '๐Ÿšค', - '๐Ÿ›ณ๏ธ', - '๐Ÿ›ณ', - 'โ›ด๏ธ', - 'โ›ด', - '๐Ÿ›ฅ๏ธ', - '๐Ÿ›ฅ', - '๐Ÿšข', - 'โœˆ๏ธ', - 'โœˆ', - '๐Ÿ›ฉ๏ธ', - '๐Ÿ›ฉ', - '๐Ÿ›ซ', - '๐Ÿ›ฌ', - '๐Ÿช‚', - '๐Ÿ’บ', - '๐Ÿš', - '๐ŸšŸ', - '๐Ÿš ', - '๐Ÿšก', - '๐Ÿ›ฐ๏ธ', - '๐Ÿ›ฐ', - '๐Ÿš€', - '๐Ÿ›ธ', - '๐Ÿ›Ž๏ธ', - '๐Ÿ›Ž', - '๐Ÿงณ', - 'โŒ›', - 'โณ', - 'โŒš', - 'โฐ', - 'โฑ๏ธ', - 'โฑ', - 'โฒ๏ธ', - 'โฒ', - '๐Ÿ•ฐ๏ธ', - '๐Ÿ•ฐ', - '๐Ÿ•›', - '๐Ÿ•ง', - '๐Ÿ•', - '๐Ÿ•œ', - '๐Ÿ•‘', - '๐Ÿ•', - '๐Ÿ•’', - '๐Ÿ•ž', - '๐Ÿ•“', - '๐Ÿ•Ÿ', - '๐Ÿ•”', - '๐Ÿ• ', - '๐Ÿ••', - '๐Ÿ•ก', - '๐Ÿ•–', - '๐Ÿ•ข', - '๐Ÿ•—', - '๐Ÿ•ฃ', - '๐Ÿ•˜', - '๐Ÿ•ค', - '๐Ÿ•™', - '๐Ÿ•ฅ', - '๐Ÿ•š', - '๐Ÿ•ฆ', - '๐ŸŒ‘', - '๐ŸŒ’', - '๐ŸŒ“', - '๐ŸŒ”', - '๐ŸŒ•', - '๐ŸŒ–', - '๐ŸŒ—', - '๐ŸŒ˜', - '๐ŸŒ™', - '๐ŸŒš', - '๐ŸŒ›', - '๐ŸŒœ', - '๐ŸŒก๏ธ', - '๐ŸŒก', - 'โ˜€๏ธ', - 'โ˜€', - '๐ŸŒ', - '๐ŸŒž', - '๐Ÿช', - 'โญ', - '๐ŸŒŸ', - '๐ŸŒ ', - '๐ŸŒŒ', - 'โ˜๏ธ', - 'โ˜', - 'โ›…', - 'โ›ˆ๏ธ', - 'โ›ˆ', - '๐ŸŒค๏ธ', - '๐ŸŒค', - '๐ŸŒฅ๏ธ', - '๐ŸŒฅ', - '๐ŸŒฆ๏ธ', - '๐ŸŒฆ', - '๐ŸŒง๏ธ', - '๐ŸŒง', - '๐ŸŒจ๏ธ', - '๐ŸŒจ', - '๐ŸŒฉ๏ธ', - '๐ŸŒฉ', - '๐ŸŒช๏ธ', - '๐ŸŒช', - '๐ŸŒซ๏ธ', - '๐ŸŒซ', - '๐ŸŒฌ๏ธ', - '๐ŸŒฌ', - '๐ŸŒ€', - '๐ŸŒˆ', - '๐ŸŒ‚', - 'โ˜‚๏ธ', - 'โ˜‚', - 'โ˜”', - 'โ›ฑ๏ธ', - 'โ›ฑ', - 'โšก', - 'โ„๏ธ', - 'โ„', - 'โ˜ƒ๏ธ', - 'โ˜ƒ', - 'โ›„', - 'โ˜„๏ธ', - 'โ˜„', - '๐Ÿ”ฅ', - '๐Ÿ’ง', - '๐ŸŒŠ', - '๐ŸŽƒ', - '๐ŸŽ„', - '๐ŸŽ†', - '๐ŸŽ‡', - '๐Ÿงจ', - 'โœจ', - '๐ŸŽˆ', - '๐ŸŽ‰', - '๐ŸŽŠ', - '๐ŸŽ‹', - '๐ŸŽ', - '๐ŸŽŽ', - '๐ŸŽ', - '๐ŸŽ', - '๐ŸŽ‘', - '๐Ÿงง', - '๐ŸŽ€', - '๐ŸŽ', - '๐ŸŽ—๏ธ', - '๐ŸŽ—', - '๐ŸŽŸ๏ธ', - '๐ŸŽŸ', - '๐ŸŽซ', - '๐ŸŽ–๏ธ', - '๐ŸŽ–', - '๐Ÿ†', - '๐Ÿ…', - '๐Ÿฅ‡', - '๐Ÿฅˆ', - '๐Ÿฅ‰', - 'โšฝ', - 'โšพ', - '๐ŸฅŽ', - '๐Ÿ€', - '๐Ÿ', - '๐Ÿˆ', - '๐Ÿ‰', - '๐ŸŽพ', - '๐Ÿฅ', - '๐ŸŽณ', - '๐Ÿ', - '๐Ÿ‘', - '๐Ÿ’', - '๐Ÿฅ', - '๐Ÿ“', - '๐Ÿธ', - '๐ŸฅŠ', - '๐Ÿฅ‹', - '๐Ÿฅ…', - 'โ›ณ', - 'โ›ธ๏ธ', - 'โ›ธ', - '๐ŸŽฃ', - '๐Ÿคฟ', - '๐ŸŽฝ', - '๐ŸŽฟ', - '๐Ÿ›ท', - '๐ŸฅŒ', - '๐ŸŽฏ', - '๐Ÿช€', - '๐Ÿช', - '๐ŸŽฑ', - '๐Ÿ”ฎ', - '๐Ÿงฟ', - '๐ŸŽฎ', - '๐Ÿ•น๏ธ', - '๐Ÿ•น', - '๐ŸŽฐ', - '๐ŸŽฒ', - '๐Ÿงฉ', - '๐Ÿงธ', - 'โ™ ๏ธ', - 'โ™ ', - 'โ™ฅ๏ธ', - 'โ™ฅ', - 'โ™ฆ๏ธ', - 'โ™ฆ', - 'โ™ฃ๏ธ', - 'โ™ฃ', - 'โ™Ÿ๏ธ', - 'โ™Ÿ', - '๐Ÿƒ', - '๐Ÿ€„', - '๐ŸŽด', - '๐ŸŽญ', - '๐Ÿ–ผ๏ธ', - '๐Ÿ–ผ', - '๐ŸŽจ', - '๐Ÿงต', - '๐Ÿงถ', - '๐Ÿ‘“', - '๐Ÿ•ถ๏ธ', - '๐Ÿ•ถ', - '๐Ÿฅฝ', - '๐Ÿฅผ', - '๐Ÿฆบ', - '๐Ÿ‘”', - '๐Ÿ‘•', - '๐Ÿ‘–', - '๐Ÿงฃ', - '๐Ÿงค', - '๐Ÿงฅ', - '๐Ÿงฆ', - '๐Ÿ‘—', - '๐Ÿ‘˜', - '๐Ÿฅป', - '๐Ÿฉฑ', - '๐Ÿฉฒ', - '๐Ÿฉณ', - '๐Ÿ‘™', - '๐Ÿ‘š', - '๐Ÿ‘›', - '๐Ÿ‘œ', - '๐Ÿ‘', - '๐Ÿ›๏ธ', - '๐Ÿ›', - '๐ŸŽ’', - '๐Ÿ‘ž', - '๐Ÿ‘Ÿ', - '๐Ÿฅพ', - '๐Ÿฅฟ', - '๐Ÿ‘ ', - '๐Ÿ‘ก', - '๐Ÿฉฐ', - '๐Ÿ‘ข', - '๐Ÿ‘‘', - '๐Ÿ‘’', - '๐ŸŽฉ', - '๐ŸŽ“', - '๐Ÿงข', - 'โ›‘๏ธ', - 'โ›‘', - '๐Ÿ“ฟ', - '๐Ÿ’„', - '๐Ÿ’', - '๐Ÿ’Ž', - '๐Ÿ”‡', - '๐Ÿ”ˆ', - '๐Ÿ”‰', - '๐Ÿ”Š', - '๐Ÿ“ข', - '๐Ÿ“ฃ', - '๐Ÿ“ฏ', - '๐Ÿ””', - '๐Ÿ”•', - '๐ŸŽผ', - '๐ŸŽต', - '๐ŸŽถ', - '๐ŸŽ™๏ธ', - '๐ŸŽ™', - '๐ŸŽš๏ธ', - '๐ŸŽš', - '๐ŸŽ›๏ธ', - '๐ŸŽ›', - '๐ŸŽค', - '๐ŸŽง', - '๐Ÿ“ป', - '๐ŸŽท', - '๐ŸŽธ', - '๐ŸŽน', - '๐ŸŽบ', - '๐ŸŽป', - '๐Ÿช•', - '๐Ÿฅ', - '๐Ÿ“ฑ', - '๐Ÿ“ฒ', - 'โ˜Ž๏ธ', - 'โ˜Ž', - '๐Ÿ“ž', - '๐Ÿ“Ÿ', - '๐Ÿ“ ', - '๐Ÿ”‹', - '๐Ÿ”Œ', - '๐Ÿ’ป', - '๐Ÿ–ฅ๏ธ', - '๐Ÿ–ฅ', - '๐Ÿ–จ๏ธ', - '๐Ÿ–จ', - 'โŒจ๏ธ', - 'โŒจ', - '๐Ÿ–ฑ๏ธ', - '๐Ÿ–ฑ', - '๐Ÿ–ฒ๏ธ', - '๐Ÿ–ฒ', - '๐Ÿ’ฝ', - '๐Ÿ’พ', - '๐Ÿ’ฟ', - '๐Ÿ“€', - '๐Ÿงฎ', - '๐ŸŽฅ', - '๐ŸŽž๏ธ', - '๐ŸŽž', - '๐Ÿ“ฝ๏ธ', - '๐Ÿ“ฝ', - '๐ŸŽฌ', - '๐Ÿ“บ', - '๐Ÿ“ท', - '๐Ÿ“ธ', - '๐Ÿ“น', - '๐Ÿ“ผ', - '๐Ÿ”', - '๐Ÿ”Ž', - '๐Ÿ•ฏ๏ธ', - '๐Ÿ•ฏ', - '๐Ÿ’ก', - '๐Ÿ”ฆ', - '๐Ÿฎ', - '๐Ÿช”', - '๐Ÿ“”', - '๐Ÿ“•', - '๐Ÿ“–', - '๐Ÿ“—', - '๐Ÿ“˜', - '๐Ÿ“™', - '๐Ÿ“š', - '๐Ÿ““', - '๐Ÿ“’', - '๐Ÿ“ƒ', - '๐Ÿ“œ', - '๐Ÿ“„', - '๐Ÿ“ฐ', - '๐Ÿ—ž๏ธ', - '๐Ÿ—ž', - '๐Ÿ“‘', - '๐Ÿ”–', - '๐Ÿท๏ธ', - '๐Ÿท', - '๐Ÿ’ฐ', - '๐Ÿ’ด', - '๐Ÿ’ต', - '๐Ÿ’ถ', - '๐Ÿ’ท', - '๐Ÿ’ธ', - '๐Ÿ’ณ', - '๐Ÿงพ', - '๐Ÿ’น', - '๐Ÿ’ฑ', - '๐Ÿ’ฒ', - 'โœ‰๏ธ', - 'โœ‰', - '๐Ÿ“ง', - '๐Ÿ“จ', - '๐Ÿ“ฉ', - '๐Ÿ“ค', - '๐Ÿ“ฅ', - '๐Ÿ“ฆ', - '๐Ÿ“ซ', - '๐Ÿ“ช', - '๐Ÿ“ฌ', - '๐Ÿ“ญ', - '๐Ÿ“ฎ', - '๐Ÿ—ณ๏ธ', - '๐Ÿ—ณ', - 'โœ๏ธ', - 'โœ', - 'โœ’๏ธ', - 'โœ’', - '๐Ÿ–‹๏ธ', - '๐Ÿ–‹', - '๐Ÿ–Š๏ธ', - '๐Ÿ–Š', - '๐Ÿ–Œ๏ธ', - '๐Ÿ–Œ', - '๐Ÿ–๏ธ', - '๐Ÿ–', - '๐Ÿ“', - '๐Ÿ’ผ', - '๐Ÿ“', - '๐Ÿ“‚', - '๐Ÿ—‚๏ธ', - '๐Ÿ—‚', - '๐Ÿ“…', - '๐Ÿ“†', - '๐Ÿ—’๏ธ', - '๐Ÿ—’', - '๐Ÿ—“๏ธ', - '๐Ÿ—“', - '๐Ÿ“‡', - '๐Ÿ“ˆ', - '๐Ÿ“‰', - '๐Ÿ“Š', - '๐Ÿ“‹', - '๐Ÿ“Œ', - '๐Ÿ“', - '๐Ÿ“Ž', - '๐Ÿ–‡๏ธ', - '๐Ÿ–‡', - '๐Ÿ“', - '๐Ÿ“', - 'โœ‚๏ธ', - 'โœ‚', - '๐Ÿ—ƒ๏ธ', - '๐Ÿ—ƒ', - '๐Ÿ—„๏ธ', - '๐Ÿ—„', - '๐Ÿ—‘๏ธ', - '๐Ÿ—‘', - '๐Ÿ”’', - '๐Ÿ”“', - '๐Ÿ”', - '๐Ÿ”', - '๐Ÿ”‘', - '๐Ÿ—๏ธ', - '๐Ÿ—', - '๐Ÿ”จ', - '๐Ÿช“', - 'โ›๏ธ', - 'โ›', - 'โš’๏ธ', - 'โš’', - '๐Ÿ› ๏ธ', - '๐Ÿ› ', - '๐Ÿ—ก๏ธ', - '๐Ÿ—ก', - 'โš”๏ธ', - 'โš”', - '๐Ÿ”ซ', - '๐Ÿน', - '๐Ÿ›ก๏ธ', - '๐Ÿ›ก', - '๐Ÿ”ง', - '๐Ÿ”ฉ', - 'โš™๏ธ', - 'โš™', - '๐Ÿ—œ๏ธ', - '๐Ÿ—œ', - 'โš–๏ธ', - 'โš–', - '๐Ÿฆฏ', - '๐Ÿ”—', - 'โ›“๏ธ', - 'โ›“', - '๐Ÿงฐ', - '๐Ÿงฒ', - 'โš—๏ธ', - 'โš—', - '๐Ÿงช', - '๐Ÿงซ', - '๐Ÿงฌ', - '๐Ÿ”ฌ', - '๐Ÿ”ญ', - '๐Ÿ“ก', - '๐Ÿ’‰', - '๐Ÿฉธ', - '๐Ÿ’Š', - '๐Ÿฉน', - '๐Ÿฉบ', - '๐Ÿšช', - '๐Ÿ›๏ธ', - '๐Ÿ›', - '๐Ÿ›‹๏ธ', - '๐Ÿ›‹', - '๐Ÿช‘', - '๐Ÿšฝ', - '๐Ÿšฟ', - '๐Ÿ›', - '๐Ÿช’', - '๐Ÿงด', - '๐Ÿงท', - '๐Ÿงน', - '๐Ÿงบ', - '๐Ÿงป', - '๐Ÿงผ', - '๐Ÿงฝ', - '๐Ÿงฏ', - '๐Ÿ›’', - '๐Ÿšฌ', - 'โšฐ๏ธ', - 'โšฐ', - 'โšฑ๏ธ', - 'โšฑ', - '๐Ÿ—ฟ', - '๐Ÿง', - '๐Ÿšฎ', - '๐Ÿšฐ', - 'โ™ฟ', - '๐Ÿšน', - '๐Ÿšบ', - '๐Ÿšป', - '๐Ÿšผ', - '๐Ÿšพ', - '๐Ÿ›‚', - '๐Ÿ›ƒ', - '๐Ÿ›„', - '๐Ÿ›…', - 'โš ๏ธ', - 'โš ', - '๐Ÿšธ', - 'โ›”', - '๐Ÿšซ', - '๐Ÿšณ', - '๐Ÿšญ', - '๐Ÿšฏ', - '๐Ÿšฑ', - '๐Ÿšท', - '๐Ÿ“ต', - '๐Ÿ”ž', - 'โ˜ข๏ธ', - 'โ˜ข', - 'โ˜ฃ๏ธ', - 'โ˜ฃ', - 'โฌ†๏ธ', - 'โฌ†', - 'โ†—๏ธ', - 'โ†—', - 'โžก๏ธ', - 'โžก', - 'โ†˜๏ธ', - 'โ†˜', - 'โฌ‡๏ธ', - 'โฌ‡', - 'โ†™๏ธ', - 'โ†™', - 'โฌ…๏ธ', - 'โฌ…', - 'โ†–๏ธ', - 'โ†–', - 'โ†•๏ธ', - 'โ†•', - 'โ†”๏ธ', - 'โ†”', - 'โ†ฉ๏ธ', - 'โ†ฉ', - 'โ†ช๏ธ', - 'โ†ช', - 'โคด๏ธ', - 'โคด', - 'โคต๏ธ', - 'โคต', - '๐Ÿ”ƒ', - '๐Ÿ”„', - '๐Ÿ”™', - '๐Ÿ”š', - '๐Ÿ”›', - '๐Ÿ”œ', - '๐Ÿ”', - '๐Ÿ›', - 'โš›๏ธ', - 'โš›', - '๐Ÿ•‰๏ธ', - '๐Ÿ•‰', - 'โœก๏ธ', - 'โœก', - 'โ˜ธ๏ธ', - 'โ˜ธ', - 'โ˜ฏ๏ธ', - 'โ˜ฏ', - 'โœ๏ธ', - 'โœ', - 'โ˜ฆ๏ธ', - 'โ˜ฆ', - 'โ˜ช๏ธ', - 'โ˜ช', - 'โ˜ฎ๏ธ', - 'โ˜ฎ', - '๐Ÿ•Ž', - '๐Ÿ”ฏ', - 'โ™ˆ', - 'โ™‰', - 'โ™Š', - 'โ™‹', - 'โ™Œ', - 'โ™', - 'โ™Ž', - 'โ™', - 'โ™', - 'โ™‘', - 'โ™’', - 'โ™“', - 'โ›Ž', - '๐Ÿ”€', - '๐Ÿ”', - '๐Ÿ”‚', - 'โ–ถ๏ธ', - 'โ–ถ', - 'โฉ', - 'โญ๏ธ', - 'โญ', - 'โฏ๏ธ', - 'โฏ', - 'โ—€๏ธ', - 'โ—€', - 'โช', - 'โฎ๏ธ', - 'โฎ', - '๐Ÿ”ผ', - 'โซ', - '๐Ÿ”ฝ', - 'โฌ', - 'โธ๏ธ', - 'โธ', - 'โน๏ธ', - 'โน', - 'โบ๏ธ', - 'โบ', - 'โ๏ธ', - 'โ', - '๐ŸŽฆ', - '๐Ÿ”…', - '๐Ÿ”†', - '๐Ÿ“ถ', - '๐Ÿ“ณ', - '๐Ÿ“ด', - 'โ™€๏ธ', - 'โ™€', - 'โ™‚๏ธ', - 'โ™‚', - 'โš•๏ธ', - 'โš•', - 'โ™พ๏ธ', - 'โ™พ', - 'โ™ป๏ธ', - 'โ™ป', - 'โšœ๏ธ', - 'โšœ', - '๐Ÿ”ฑ', - '๐Ÿ“›', - '๐Ÿ”ฐ', - 'โญ•', - 'โœ…', - 'โ˜‘๏ธ', - 'โ˜‘', - 'โœ”๏ธ', - 'โœ”', - 'โœ–๏ธ', - 'โœ–', - 'โŒ', - 'โŽ', - 'โž•', - 'โž–', - 'โž—', - 'โžฐ', - 'โžฟ', - 'ใ€ฝ๏ธ', - 'ใ€ฝ', - 'โœณ๏ธ', - 'โœณ', - 'โœด๏ธ', - 'โœด', - 'โ‡๏ธ', - 'โ‡', - 'โ€ผ๏ธ', - 'โ€ผ', - 'โ‰๏ธ', - 'โ‰', - 'โ“', - 'โ”', - 'โ•', - 'โ—', - 'ใ€ฐ๏ธ', - 'ใ€ฐ', - 'ยฉ๏ธ', - 'ยฉ', - 'ยฎ๏ธ', - 'ยฎ', - 'โ„ข๏ธ', - 'โ„ข', - '#๏ธโƒฃ', - '#โƒฃ', - '*๏ธโƒฃ', - '*โƒฃ', - '0๏ธโƒฃ', - '0โƒฃ', - '1๏ธโƒฃ', - '1โƒฃ', - '2๏ธโƒฃ', - '2โƒฃ', - '3๏ธโƒฃ', - '3โƒฃ', - '4๏ธโƒฃ', - '4โƒฃ', - '5๏ธโƒฃ', - '5โƒฃ', - '6๏ธโƒฃ', - '6โƒฃ', - '7๏ธโƒฃ', - '7โƒฃ', - '8๏ธโƒฃ', - '8โƒฃ', - '9๏ธโƒฃ', - '9โƒฃ', - '๐Ÿ”Ÿ', - '๐Ÿ” ', - '๐Ÿ”ก', - '๐Ÿ”ข', - '๐Ÿ”ฃ', - '๐Ÿ”ค', - '๐Ÿ…ฐ๏ธ', - '๐Ÿ…ฐ', - '๐Ÿ†Ž', - '๐Ÿ…ฑ๏ธ', - '๐Ÿ…ฑ', - '๐Ÿ†‘', - '๐Ÿ†’', - '๐Ÿ†“', - 'โ„น๏ธ', - 'โ„น', - '๐Ÿ†”', - 'โ“‚๏ธ', - 'โ“‚', - '๐Ÿ†•', - '๐Ÿ†–', - '๐Ÿ…พ๏ธ', - '๐Ÿ…พ', - '๐Ÿ†—', - '๐Ÿ…ฟ๏ธ', - '๐Ÿ…ฟ', - '๐Ÿ†˜', - '๐Ÿ†™', - '๐Ÿ†š', - '๐Ÿˆ', - '๐Ÿˆ‚๏ธ', - '๐Ÿˆ‚', - '๐Ÿˆท๏ธ', - '๐Ÿˆท', - '๐Ÿˆถ', - '๐Ÿˆฏ', - '๐Ÿ‰', - '๐Ÿˆน', - '๐Ÿˆš', - '๐Ÿˆฒ', - '๐Ÿ‰‘', - '๐Ÿˆธ', - '๐Ÿˆด', - '๐Ÿˆณ', - 'ใŠ—๏ธ', - 'ใŠ—', - 'ใŠ™๏ธ', - 'ใŠ™', - '๐Ÿˆบ', - '๐Ÿˆต', - '๐Ÿ”ด', - '๐ŸŸ ', - '๐ŸŸก', - '๐ŸŸข', - '๐Ÿ”ต', - '๐ŸŸฃ', - '๐ŸŸค', - 'โšซ', - 'โšช', - '๐ŸŸฅ', - '๐ŸŸง', - '๐ŸŸจ', - '๐ŸŸฉ', - '๐ŸŸฆ', - '๐ŸŸช', - '๐ŸŸซ', - 'โฌ›', - 'โฌœ', - 'โ—ผ๏ธ', - 'โ—ผ', - 'โ—ป๏ธ', - 'โ—ป', - 'โ—พ', - 'โ—ฝ', - 'โ–ช๏ธ', - 'โ–ช', - 'โ–ซ๏ธ', - 'โ–ซ', - '๐Ÿ”ถ', - '๐Ÿ”ท', - '๐Ÿ”ธ', - '๐Ÿ”น', - '๐Ÿ”บ', - '๐Ÿ”ป', - '๐Ÿ’ ', - '๐Ÿ”˜', - '๐Ÿ”ณ', - '๐Ÿ”ฒ', - '๐Ÿ', - '๐Ÿšฉ', - '๐ŸŽŒ', - '๐Ÿด', - '๐Ÿณ๏ธ', - '๐Ÿณ', - '๐Ÿณ๏ธโ€๐ŸŒˆ', - '๐Ÿณโ€๐ŸŒˆ', - '๐Ÿดโ€โ˜ ๏ธ', - '๐Ÿดโ€โ˜ ', - '๐Ÿ‡ฆ๐Ÿ‡จ', - '๐Ÿ‡ฆ๐Ÿ‡ฉ', - '๐Ÿ‡ฆ๐Ÿ‡ช', - '๐Ÿ‡ฆ๐Ÿ‡ซ', - '๐Ÿ‡ฆ๐Ÿ‡ฌ', - '๐Ÿ‡ฆ๐Ÿ‡ฎ', - '๐Ÿ‡ฆ๐Ÿ‡ฑ', - '๐Ÿ‡ฆ๐Ÿ‡ฒ', - '๐Ÿ‡ฆ๐Ÿ‡ด', - '๐Ÿ‡ฆ๐Ÿ‡ถ', - '๐Ÿ‡ฆ๐Ÿ‡ท', - '๐Ÿ‡ฆ๐Ÿ‡ธ', - '๐Ÿ‡ฆ๐Ÿ‡น', - '๐Ÿ‡ฆ๐Ÿ‡บ', - '๐Ÿ‡ฆ๐Ÿ‡ผ', - '๐Ÿ‡ฆ๐Ÿ‡ฝ', - '๐Ÿ‡ฆ๐Ÿ‡ฟ', - '๐Ÿ‡ง๐Ÿ‡ฆ', - '๐Ÿ‡ง๐Ÿ‡ง', - '๐Ÿ‡ง๐Ÿ‡ฉ', - '๐Ÿ‡ง๐Ÿ‡ช', - '๐Ÿ‡ง๐Ÿ‡ซ', - '๐Ÿ‡ง๐Ÿ‡ฌ', - '๐Ÿ‡ง๐Ÿ‡ญ', - '๐Ÿ‡ง๐Ÿ‡ฎ', - '๐Ÿ‡ง๐Ÿ‡ฏ', - '๐Ÿ‡ง๐Ÿ‡ฑ', - '๐Ÿ‡ง๐Ÿ‡ฒ', - '๐Ÿ‡ง๐Ÿ‡ณ', - '๐Ÿ‡ง๐Ÿ‡ด', - '๐Ÿ‡ง๐Ÿ‡ถ', - '๐Ÿ‡ง๐Ÿ‡ท', - '๐Ÿ‡ง๐Ÿ‡ธ', - '๐Ÿ‡ง๐Ÿ‡น', - '๐Ÿ‡ง๐Ÿ‡ป', - '๐Ÿ‡ง๐Ÿ‡ผ', - '๐Ÿ‡ง๐Ÿ‡พ', - '๐Ÿ‡ง๐Ÿ‡ฟ', - '๐Ÿ‡จ๐Ÿ‡ฆ', - '๐Ÿ‡จ๐Ÿ‡จ', - '๐Ÿ‡จ๐Ÿ‡ฉ', - '๐Ÿ‡จ๐Ÿ‡ซ', - '๐Ÿ‡จ๐Ÿ‡ฌ', - '๐Ÿ‡จ๐Ÿ‡ญ', - '๐Ÿ‡จ๐Ÿ‡ฎ', - '๐Ÿ‡จ๐Ÿ‡ฐ', - '๐Ÿ‡จ๐Ÿ‡ฑ', - '๐Ÿ‡จ๐Ÿ‡ฒ', - '๐Ÿ‡จ๐Ÿ‡ณ', - '๐Ÿ‡จ๐Ÿ‡ด', - '๐Ÿ‡จ๐Ÿ‡ต', - '๐Ÿ‡จ๐Ÿ‡ท', - '๐Ÿ‡จ๐Ÿ‡บ', - '๐Ÿ‡จ๐Ÿ‡ป', - '๐Ÿ‡จ๐Ÿ‡ผ', - '๐Ÿ‡จ๐Ÿ‡ฝ', - '๐Ÿ‡จ๐Ÿ‡พ', - '๐Ÿ‡จ๐Ÿ‡ฟ', - '๐Ÿ‡ฉ๐Ÿ‡ช', - '๐Ÿ‡ฉ๐Ÿ‡ฌ', - '๐Ÿ‡ฉ๐Ÿ‡ฏ', - '๐Ÿ‡ฉ๐Ÿ‡ฐ', - '๐Ÿ‡ฉ๐Ÿ‡ฒ', - '๐Ÿ‡ฉ๐Ÿ‡ด', - '๐Ÿ‡ฉ๐Ÿ‡ฟ', - '๐Ÿ‡ช๐Ÿ‡ฆ', - '๐Ÿ‡ช๐Ÿ‡จ', - '๐Ÿ‡ช๐Ÿ‡ช', - '๐Ÿ‡ช๐Ÿ‡ฌ', - '๐Ÿ‡ช๐Ÿ‡ญ', - '๐Ÿ‡ช๐Ÿ‡ท', - '๐Ÿ‡ช๐Ÿ‡ธ', - '๐Ÿ‡ช๐Ÿ‡น', - '๐Ÿ‡ช๐Ÿ‡บ', - '๐Ÿ‡ซ๐Ÿ‡ฎ', - '๐Ÿ‡ซ๐Ÿ‡ฏ', - '๐Ÿ‡ซ๐Ÿ‡ฐ', - '๐Ÿ‡ซ๐Ÿ‡ฒ', - '๐Ÿ‡ซ๐Ÿ‡ด', - '๐Ÿ‡ซ๐Ÿ‡ท', - '๐Ÿ‡ฌ๐Ÿ‡ฆ', - '๐Ÿ‡ฌ๐Ÿ‡ง', - '๐Ÿ‡ฌ๐Ÿ‡ฉ', - '๐Ÿ‡ฌ๐Ÿ‡ช', - '๐Ÿ‡ฌ๐Ÿ‡ซ', - '๐Ÿ‡ฌ๐Ÿ‡ฌ', - '๐Ÿ‡ฌ๐Ÿ‡ญ', - '๐Ÿ‡ฌ๐Ÿ‡ฎ', - '๐Ÿ‡ฌ๐Ÿ‡ฑ', - '๐Ÿ‡ฌ๐Ÿ‡ฒ', - '๐Ÿ‡ฌ๐Ÿ‡ณ', - '๐Ÿ‡ฌ๐Ÿ‡ต', - '๐Ÿ‡ฌ๐Ÿ‡ถ', - '๐Ÿ‡ฌ๐Ÿ‡ท', - '๐Ÿ‡ฌ๐Ÿ‡ธ', - '๐Ÿ‡ฌ๐Ÿ‡น', - '๐Ÿ‡ฌ๐Ÿ‡บ', - '๐Ÿ‡ฌ๐Ÿ‡ผ', - '๐Ÿ‡ฌ๐Ÿ‡พ', - '๐Ÿ‡ญ๐Ÿ‡ฐ', - '๐Ÿ‡ญ๐Ÿ‡ฒ', - '๐Ÿ‡ญ๐Ÿ‡ณ', - '๐Ÿ‡ญ๐Ÿ‡ท', - '๐Ÿ‡ญ๐Ÿ‡น', - '๐Ÿ‡ญ๐Ÿ‡บ', - '๐Ÿ‡ฎ๐Ÿ‡จ', - '๐Ÿ‡ฎ๐Ÿ‡ฉ', - '๐Ÿ‡ฎ๐Ÿ‡ช', - '๐Ÿ‡ฎ๐Ÿ‡ฑ', - '๐Ÿ‡ฎ๐Ÿ‡ฒ', - '๐Ÿ‡ฎ๐Ÿ‡ณ', - '๐Ÿ‡ฎ๐Ÿ‡ด', - '๐Ÿ‡ฎ๐Ÿ‡ถ', - '๐Ÿ‡ฎ๐Ÿ‡ท', - '๐Ÿ‡ฎ๐Ÿ‡ธ', - '๐Ÿ‡ฎ๐Ÿ‡น', - '๐Ÿ‡ฏ๐Ÿ‡ช', - '๐Ÿ‡ฏ๐Ÿ‡ฒ', - '๐Ÿ‡ฏ๐Ÿ‡ด', - '๐Ÿ‡ฏ๐Ÿ‡ต', - '๐Ÿ‡ฐ๐Ÿ‡ช', - '๐Ÿ‡ฐ๐Ÿ‡ฌ', - '๐Ÿ‡ฐ๐Ÿ‡ญ', - '๐Ÿ‡ฐ๐Ÿ‡ฎ', - '๐Ÿ‡ฐ๐Ÿ‡ฒ', - '๐Ÿ‡ฐ๐Ÿ‡ณ', - '๐Ÿ‡ฐ๐Ÿ‡ต', - '๐Ÿ‡ฐ๐Ÿ‡ท', - '๐Ÿ‡ฐ๐Ÿ‡ผ', - '๐Ÿ‡ฐ๐Ÿ‡พ', - '๐Ÿ‡ฐ๐Ÿ‡ฟ', - '๐Ÿ‡ฑ๐Ÿ‡ฆ', - '๐Ÿ‡ฑ๐Ÿ‡ง', - '๐Ÿ‡ฑ๐Ÿ‡จ', - '๐Ÿ‡ฑ๐Ÿ‡ฎ', - '๐Ÿ‡ฑ๐Ÿ‡ฐ', - '๐Ÿ‡ฑ๐Ÿ‡ท', - '๐Ÿ‡ฑ๐Ÿ‡ธ', - '๐Ÿ‡ฑ๐Ÿ‡น', - '๐Ÿ‡ฑ๐Ÿ‡บ', - '๐Ÿ‡ฑ๐Ÿ‡ป', - '๐Ÿ‡ฑ๐Ÿ‡พ', - '๐Ÿ‡ฒ๐Ÿ‡ฆ', - '๐Ÿ‡ฒ๐Ÿ‡จ', - '๐Ÿ‡ฒ๐Ÿ‡ฉ', - '๐Ÿ‡ฒ๐Ÿ‡ช', - '๐Ÿ‡ฒ๐Ÿ‡ซ', - '๐Ÿ‡ฒ๐Ÿ‡ฌ', - '๐Ÿ‡ฒ๐Ÿ‡ญ', - '๐Ÿ‡ฒ๐Ÿ‡ฐ', - '๐Ÿ‡ฒ๐Ÿ‡ฑ', - '๐Ÿ‡ฒ๐Ÿ‡ฒ', - '๐Ÿ‡ฒ๐Ÿ‡ณ', - '๐Ÿ‡ฒ๐Ÿ‡ด', - '๐Ÿ‡ฒ๐Ÿ‡ต', - '๐Ÿ‡ฒ๐Ÿ‡ถ', - '๐Ÿ‡ฒ๐Ÿ‡ท', - '๐Ÿ‡ฒ๐Ÿ‡ธ', - '๐Ÿ‡ฒ๐Ÿ‡น', - '๐Ÿ‡ฒ๐Ÿ‡บ', - '๐Ÿ‡ฒ๐Ÿ‡ป', - '๐Ÿ‡ฒ๐Ÿ‡ผ', - '๐Ÿ‡ฒ๐Ÿ‡ฝ', - '๐Ÿ‡ฒ๐Ÿ‡พ', - '๐Ÿ‡ฒ๐Ÿ‡ฟ', - '๐Ÿ‡ณ๐Ÿ‡ฆ', - '๐Ÿ‡ณ๐Ÿ‡จ', - '๐Ÿ‡ณ๐Ÿ‡ช', - '๐Ÿ‡ณ๐Ÿ‡ซ', - '๐Ÿ‡ณ๐Ÿ‡ฌ', - '๐Ÿ‡ณ๐Ÿ‡ฎ', - '๐Ÿ‡ณ๐Ÿ‡ฑ', - '๐Ÿ‡ณ๐Ÿ‡ด', - '๐Ÿ‡ณ๐Ÿ‡ต', - '๐Ÿ‡ณ๐Ÿ‡ท', - '๐Ÿ‡ณ๐Ÿ‡บ', - '๐Ÿ‡ณ๐Ÿ‡ฟ', - '๐Ÿ‡ด๐Ÿ‡ฒ', - '๐Ÿ‡ต๐Ÿ‡ฆ', - '๐Ÿ‡ต๐Ÿ‡ช', - '๐Ÿ‡ต๐Ÿ‡ซ', - '๐Ÿ‡ต๐Ÿ‡ฌ', - '๐Ÿ‡ต๐Ÿ‡ญ', - '๐Ÿ‡ต๐Ÿ‡ฐ', - '๐Ÿ‡ต๐Ÿ‡ฑ', - '๐Ÿ‡ต๐Ÿ‡ฒ', - '๐Ÿ‡ต๐Ÿ‡ณ', - '๐Ÿ‡ต๐Ÿ‡ท', - '๐Ÿ‡ต๐Ÿ‡ธ', - '๐Ÿ‡ต๐Ÿ‡น', - '๐Ÿ‡ต๐Ÿ‡ผ', - '๐Ÿ‡ต๐Ÿ‡พ', - '๐Ÿ‡ถ๐Ÿ‡ฆ', - '๐Ÿ‡ท๐Ÿ‡ช', - '๐Ÿ‡ท๐Ÿ‡ด', - '๐Ÿ‡ท๐Ÿ‡ธ', - '๐Ÿ‡ท๐Ÿ‡บ', - '๐Ÿ‡ท๐Ÿ‡ผ', - '๐Ÿ‡ธ๐Ÿ‡ฆ', - '๐Ÿ‡ธ๐Ÿ‡ง', - '๐Ÿ‡ธ๐Ÿ‡จ', - '๐Ÿ‡ธ๐Ÿ‡ฉ', - '๐Ÿ‡ธ๐Ÿ‡ช', - '๐Ÿ‡ธ๐Ÿ‡ฌ', - '๐Ÿ‡ธ๐Ÿ‡ญ', - '๐Ÿ‡ธ๐Ÿ‡ฎ', - '๐Ÿ‡ธ๐Ÿ‡ฏ', - '๐Ÿ‡ธ๐Ÿ‡ฐ', - '๐Ÿ‡ธ๐Ÿ‡ฑ', - '๐Ÿ‡ธ๐Ÿ‡ฒ', - '๐Ÿ‡ธ๐Ÿ‡ณ', - '๐Ÿ‡ธ๐Ÿ‡ด', - '๐Ÿ‡ธ๐Ÿ‡ท', - '๐Ÿ‡ธ๐Ÿ‡ธ', - '๐Ÿ‡ธ๐Ÿ‡น', - '๐Ÿ‡ธ๐Ÿ‡ป', - '๐Ÿ‡ธ๐Ÿ‡ฝ', - '๐Ÿ‡ธ๐Ÿ‡พ', - '๐Ÿ‡ธ๐Ÿ‡ฟ', - '๐Ÿ‡น๐Ÿ‡ฆ', - '๐Ÿ‡น๐Ÿ‡จ', - '๐Ÿ‡น๐Ÿ‡ฉ', - '๐Ÿ‡น๐Ÿ‡ซ', - '๐Ÿ‡น๐Ÿ‡ฌ', - '๐Ÿ‡น๐Ÿ‡ญ', - '๐Ÿ‡น๐Ÿ‡ฏ', - '๐Ÿ‡น๐Ÿ‡ฐ', - '๐Ÿ‡น๐Ÿ‡ฑ', - '๐Ÿ‡น๐Ÿ‡ฒ', - '๐Ÿ‡น๐Ÿ‡ณ', - '๐Ÿ‡น๐Ÿ‡ด', - '๐Ÿ‡น๐Ÿ‡ท', - '๐Ÿ‡น๐Ÿ‡น', - '๐Ÿ‡น๐Ÿ‡ป', - '๐Ÿ‡น๐Ÿ‡ผ', - '๐Ÿ‡น๐Ÿ‡ฟ', - '๐Ÿ‡บ๐Ÿ‡ฆ', - '๐Ÿ‡บ๐Ÿ‡ฌ', - '๐Ÿ‡บ๐Ÿ‡ฒ', - '๐Ÿ‡บ๐Ÿ‡ณ', - '๐Ÿ‡บ๐Ÿ‡ธ', - '๐Ÿ‡บ๐Ÿ‡พ', - '๐Ÿ‡บ๐Ÿ‡ฟ', - '๐Ÿ‡ป๐Ÿ‡ฆ', - '๐Ÿ‡ป๐Ÿ‡จ', - '๐Ÿ‡ป๐Ÿ‡ช', - '๐Ÿ‡ป๐Ÿ‡ฌ', - '๐Ÿ‡ป๐Ÿ‡ฎ', - '๐Ÿ‡ป๐Ÿ‡ณ', - '๐Ÿ‡ป๐Ÿ‡บ', - '๐Ÿ‡ผ๐Ÿ‡ซ', - '๐Ÿ‡ผ๐Ÿ‡ธ', - '๐Ÿ‡ฝ๐Ÿ‡ฐ', - '๐Ÿ‡พ๐Ÿ‡ช', - '๐Ÿ‡พ๐Ÿ‡น', - '๐Ÿ‡ฟ๐Ÿ‡ฆ', - '๐Ÿ‡ฟ๐Ÿ‡ฒ', - '๐Ÿ‡ฟ๐Ÿ‡ผ', - '๐Ÿด๓ ง๓ ข๓ ฅ๓ ฎ๓ ง๓ ฟ', - '๐Ÿด๓ ง๓ ข๓ ณ๓ ฃ๓ ด๓ ฟ', - '๐Ÿด๓ ง๓ ข๓ ท๓ ฌ๓ ณ๓ ฟ' - ] - } - } - }, - 'additionalProperties': false -} \ No newline at end of file diff --git a/crawler/media.js b/crawler/media.js deleted file mode 100644 index 7d018a7a..00000000 --- a/crawler/media.js +++ /dev/null @@ -1,450 +0,0 @@ -const assert = require('assert') -const {URL} = require('url') -const Events = require('events') -const Ajv = require('ajv') -const logger = require('../logger').child({category: 'crawler', dataset: 'media'}) -const db = require('../dbs/profile-data-db') -const crawler = require('./index') -const lock = require('../lib/lock') -const knex = require('../lib/knex') -const siteDescriptions = require('./site-descriptions') -const {doCrawl, doCheckpoint, emitProgressEvent, getMatchingChangesInOrder, generateTimeFilename, ensureDirectory, normalizeSchemaUrl, toOrigin} = require('./util') -const mediaSchema = require('./json-schemas/media') - -// constants -// = - -const TABLE_VERSION = 1 -const JSON_TYPE = 'unwalled.garden/media' -const JSON_PATH_REGEX = /^\/data\/media\/([^/]+)\.json$/i - -// typedefs -// = - -/** - * @typedef {import('../dat/library').InternalDatArchive} InternalDatArchive - * @typedef {import('./util').CrawlSourceRecord} CrawlSourceRecord - * @typedef { import("./site-descriptions").SiteDescription } SiteDescription - * - * @typedef {Object} Media - * @prop {string} pathname - * @prop {string} subtype - * @prop {string} href - * @prop {string} title - * @prop {string} description - * @prop {string[]} tags - * @prop {string} createdAt - * @prop {string} updatedAt - * @prop {SiteDescription} author - * @prop {string} visibility - */ - -// globals -// = - -const events = new Events() -const ajv = (new Ajv()) -const validateMedia = ajv.compile(mediaSchema) - -// exported api -// = - -exports.on = events.on.bind(events) -exports.addListener = events.addListener.bind(events) -exports.removeListener = events.removeListener.bind(events) - -/** - * @description - * Crawl the given site for media. - * - * @param {InternalDatArchive} archive - site to crawl. - * @param {CrawlSourceRecord} crawlSource - internal metadata about the crawl target. - * @returns {Promise} - */ -exports.crawlSite = async function (archive, crawlSource) { - return doCrawl(archive, crawlSource, 'crawl_media', TABLE_VERSION, async ({changes, resetRequired}) => { - const supressEvents = resetRequired === true // dont emit when replaying old info - logger.silly('Crawling media', {details: {url: archive.url, numChanges: changes.length, resetRequired}}) - if (resetRequired) { - // reset all data - logger.debug('Resetting dataset', {details: {url: archive.url}}) - await db.run(` - DELETE FROM crawl_media WHERE crawlSourceId = ? - `, [crawlSource.id]) - await doCheckpoint('crawl_media', TABLE_VERSION, crawlSource, 0) - } - - // collect changed media - var changedMedia = getMatchingChangesInOrder(changes, JSON_PATH_REGEX) - if (changedMedia.length) { - logger.verbose('Collected new/changed media files', {details: {url: archive.url, changedMedia: changedMedia.map(p => p.name)}}) - } else { - logger.debug('No new media-files found', {details: {url: archive.url}}) - } - emitProgressEvent(archive.url, 'crawl_media', 0, changedMedia.length) - - // read and apply each media in order - var progress = 0 - for (let changedMediaItem of changedMedia) { - // TODO Currently the crawler will abort reading the feed if any media fails to load - // this means that a single unreachable file can stop the forward progress of media indexing - // to solve this, we need to find a way to tolerate unreachable media-files without losing our ability to efficiently detect new media - // -prf - if (changedMediaItem.type === 'del') { - // delete - await db.run(` - DELETE FROM crawl_media WHERE crawlSourceId = ? AND pathname = ? - `, [crawlSource.id, changedMediaItem.name]) - events.emit('media-removed', archive.url) - } else { - // read - let mediaString - try { - mediaString = await archive.pda.readFile(changedMediaItem.name, 'utf8') - } catch (err) { - logger.warn('Failed to read media file, aborting', {details: {url: archive.url, name: changedMediaItem.name, err}}) - return // abort indexing - } - - // parse and validate - let media - try { - media = JSON.parse(mediaString) - let valid = validateMedia(media) - if (!valid) throw ajv.errorsText(validateMedia.errors) - } catch (err) { - logger.warn('Failed to parse media file, skipping', {details: {url: archive.url, name: changedMediaItem.name, err}}) - continue // skip - } - - // massage the media - media.subtype = normalizeSchemaUrl(media.subtype) - media.createdAt = Number(new Date(media.createdAt)) - media.updatedAt = Number(new Date(media.updatedAt)) - if (!media.description) media.description = '' // optional - if (!media.tags) media.tags = [] // optional - if (isNaN(media.updatedAt)) media.updatedAt = 0 // optional - - // upsert - let mediaId = 0 - let existingMedia = await db.get(knex('crawl_media') - .select('id') - .where({ - crawlSourceId: crawlSource.id, - pathname: changedMediaItem.name - }) - ) - if (existingMedia) { - await db.run(knex('crawl_media') - .where({ - crawlSourceId: crawlSource.id, - pathname: changedMediaItem.name - }).update({ - crawledAt: Date.now(), - subtype: media.subtype, - href: media.href, - title: media.title, - description: media.description, - createdAt: media.createdAt, - updatedAt: media.updatedAt, - }) - ) - mediaId = existingMedia.id - events.emit('media-updated', archive.url) - } else { - let res = await db.run(knex('crawl_media') - .insert({ - crawlSourceId: crawlSource.id, - pathname: changedMediaItem.name, - crawledAt: Date.now(), - subtype: media.subtype, - href: media.href, - title: media.title, - description: media.description, - createdAt: media.createdAt, - updatedAt: media.updatedAt, - }) - ) - mediaId = +res.lastID - events.emit('media-added', archive.url) - } - await db.run(`DELETE FROM crawl_media_tags WHERE crawlMediaId = ?`, [mediaId]) - for (let tag of media.tags) { - await db.run(`INSERT OR IGNORE INTO crawl_tags (tag) VALUES (?)`, [tag]) - let tagRow = await db.get(`SELECT id FROM crawl_tags WHERE tag = ?`, [tag]) - await db.run(`INSERT INTO crawl_media_tags (crawlMediaId, crawlTagId) VALUES (?, ?)`, [mediaId, tagRow.id]) - } - } - - // checkpoint our progress - await doCheckpoint('crawl_media', TABLE_VERSION, crawlSource, changedMediaItem.version) - emitProgressEvent(archive.url, 'crawl_media', ++progress, changedMedia.length) - } - logger.silly(`Finished crawling media`, {details: {url: archive.url}}) - }) -} - -/** - * @description - * List crawled media. - * - * @param {Object} [opts] - * @param {Object} [opts.filters] - * @param {string|string[]} [opts.filters.authors] - * @param {string|string[]} [opts.filters.hrefs] - * @param {string|string[]} [opts.filters.subtypes] - * @param {string|string[]} [opts.filters.tags] - * @param {string} [opts.filters.visibility] - * @param {string} [opts.sortBy] - * @param {number} [opts.offset=0] - * @param {number} [opts.limit] - * @param {boolean} [opts.reverse] - * @returns {Promise>} - */ -exports.list = async function (opts) { - // TODO: handle visibility - // TODO: sortBy options - - // validate & parse params - if (opts && 'sortBy' in opts) assert(typeof opts.sortBy === 'number', 'SortBy must be a string') - if (opts && 'offset' in opts) assert(typeof opts.offset === 'number', 'Offset must be a number') - if (opts && 'limit' in opts) assert(typeof opts.limit === 'number', 'Limit must be a number') - if (opts && 'reverse' in opts) assert(typeof opts.reverse === 'boolean', 'Reverse must be a boolean') - if (opts && opts.filters) { - if ('authors' in opts.filters) { - if (Array.isArray(opts.filters.authors)) { - assert(opts.filters.authors.every(v => typeof v === 'string'), 'Authors filter must be a string or array of strings') - } else { - assert(typeof opts.filters.authors === 'string', 'Authors filter must be a string or array of strings') - opts.filters.authors = [opts.filters.authors] - } - opts.filters.authors = opts.filters.authors.map(url => toOrigin(url, true)) - } - if ('hrefs' in opts.filters) { - if (Array.isArray(opts.filters.hrefs)) { - assert(opts.filters.hrefs.every(v => typeof v === 'string'), 'Hrefs filter must be a string or array of strings') - } else { - assert(typeof opts.filters.hrefs === 'string', 'Hrefs filter must be a string or array of strings') - opts.filters.hrefs = [opts.filters.hrefs] - } - } - if ('subtypes' in opts.filters) { - if (Array.isArray(opts.filters.subtypes)) { - assert(opts.filters.subtypes.every(v => typeof v === 'string'), 'Subtypes filter must be a string or array of strings') - } else { - assert(typeof opts.filters.subtypes === 'string', 'Subtypes filter must be a string or array of strings') - opts.filters.subtypes = [opts.filters.subtypes] - } - opts.filters.subtypes = opts.filters.subtypes.map(normalizeSchemaUrl) - } - if ('tags' in opts.filters) { - if (Array.isArray(opts.filters.tags)) { - assert(opts.filters.tags.every(v => typeof v === 'string'), 'Tags filter must be a string or array of strings') - } else { - assert(typeof opts.filters.tags === 'string', 'Tags filter must be a string or array of strings') - opts.filters.tags = [opts.filters.tags] - } - } - } - - // build query - var sql = knex('crawl_media') - .select('crawl_media.*') - .select('crawl_sources.url as crawlSourceUrl') - .select(knex.raw('group_concat(crawl_tags.tag, ",") as tags')) - .innerJoin('crawl_sources', 'crawl_sources.id', '=', 'crawl_media.crawlSourceId') - .leftJoin('crawl_media_tags', 'crawl_media_tags.crawlMediaId', '=', 'crawl_media.id') - .leftJoin('crawl_tags', 'crawl_media_tags.crawlTagId', '=', 'crawl_tags.id') - .groupBy('crawl_media.id') - .orderBy('crawl_media.createdAt', opts.reverse ? 'DESC' : 'ASC') - if (opts && opts.filters && opts.filters.authors) { - sql = sql.whereIn('crawl_sources.url', opts.filters.authors) - } - if (opts && opts.filters && opts.filters.hrefs) { - sql = sql.whereIn('crawl_media.href', opts.filters.hrefs) - } - if (opts && opts.filters && opts.filters.subtypes) { - sql = sql.whereIn('crawl_media.subtype', opts.filters.subtypes) - } - if (opts && opts.limit) sql = sql.limit(opts.limit) - if (opts && opts.offset) sql = sql.offset(opts.offset) - - // execute query - var rows = await db.all(sql) - var media = await Promise.all(rows.map(massageMediaRow)) - - // apply tags filter - if (opts && opts.filters && opts.filters.tags) { - const someFn = t => opts.filters.tags.includes(t) - media = media.filter(m => m.tags.some(someFn)) - } - - return media -} - -/** - * @description - * Get crawled media. - * - * @param {string} url - The URL of the media - * @returns {Promise} - */ -const get = exports.get = async function (url) { - // validate & parse params - var urlParsed - if (url) { - try { urlParsed = new URL(url) } - catch (e) { throw new Error('Invalid URL: ' + url) } - } - - // build query - var sql = knex('crawl_media') - .select('crawl_media.*') - .select('crawl_sources.url as crawlSourceUrl') - .select(knex.raw('group_concat(crawl_tags.tag, ",") as tags')) - .innerJoin('crawl_sources', function () { - this.on('crawl_sources.id', '=', 'crawl_media.crawlSourceId') - .andOn('crawl_sources.url', '=', knex.raw('?', `${urlParsed.protocol}//${urlParsed.hostname}`)) - }) - .leftJoin('crawl_media_tags', 'crawl_media_tags.crawlMediaId', '=', 'crawl_media.id') - .leftJoin('crawl_tags', 'crawl_tags.id', '=', 'crawl_media_tags.crawlTagId') - .where('crawl_media.pathname', urlParsed.pathname) - .groupBy('crawl_media.id') - - // execute query - return await massageMediaRow(await db.get(sql)) -} - -/** - * @description - * Create a new media. - * - * @param {InternalDatArchive} archive - where to write the media to. - * @param {Object} media - * @param {string} media.subtype - * @param {string} media.href - * @param {string} media.title - * @param {string} media.description - * @param {string[]} media.tags - * @param {string} media.visibility - * @returns {Promise} url - */ -exports.add = async function (archive, media) { - // TODO visibility - - var mediaObject = { - type: JSON_TYPE, - subtype: normalizeSchemaUrl(media.subtype), - href: media.href, - title: media.title, - description: media.description, - tags: media.tags, - createdAt: (new Date()).toISOString() - } - var valid = validateMedia(mediaObject) - if (!valid) throw ajv.errorsText(validateMedia.errors) - - var filename = generateTimeFilename() - var filepath = `/data/media/${filename}.json` - await ensureDirectory(archive, '/data') - await ensureDirectory(archive, '/data/media') - await archive.pda.writeFile(filepath, JSON.stringify(mediaObject, null, 2)) - await crawler.crawlSite(archive) - return archive.url + filepath -} - -/** - * @description - * Update the content of an existing media. - * - * @param {InternalDatArchive} archive - where to write the media to. - * @param {string} pathname - the pathname of the media. - * @param {Object} media - * @param {string} [media.subtype] - * @param {string} [media.href] - * @param {string} [media.title] - * @param {string} [media.description] - * @param {string[]} [media.tags] - * @param {string} [media.visibility] - * @returns {Promise} - */ -exports.edit = async function (archive, pathname, media) { - // TODO visibility - - var release = await lock('crawler:media:' + archive.url) - try { - // fetch media - var existingMedia = await get(archive.url + pathname) - if (!existingMedia) throw new Error('Media not found') - - // update media content - var mediaObject = { - type: JSON_TYPE, - subtype: normalizeSchemaUrl(('subtype' in media) ? media.subtype : existingMedia.subtype), - href: ('href' in media) ? media.href : existingMedia.href, - title: ('title' in media) ? media.title : existingMedia.title, - description: ('description' in media) ? media.description : existingMedia.description, - tags: ('tags' in media) ? media.tags : existingMedia.tags, - createdAt: existingMedia.createdAt, - updatedAt: (new Date()).toISOString() - } - - // validate - var valid = validateMedia(mediaObject) - if (!valid) throw ajv.errorsText(validateMedia.errors) - - // write - await archive.pda.writeFile(pathname, JSON.stringify(mediaObject, null, 2)) - await crawler.crawlSite(archive) - } finally { - release() - } -} - -/** - * @description - * Delete an existing media - * - * @param {InternalDatArchive} archive - where to write the media to. - * @param {string} pathname - the pathname of the media. - * @returns {Promise} - */ -exports.remove = async function (archive, pathname) { - assert(typeof pathname === 'string', 'Remove() must be provided a valid URL string') - await archive.pda.unlink(pathname) - await crawler.crawlSite(archive) -} - -// internal methods -// = - -/** - * @param {Object} row - * @returns {Promise} - */ -async function massageMediaRow (row) { - if (!row) return null - var author = await siteDescriptions.getBest({subject: row.crawlSourceUrl}) - if (!author) { - author = { - url: row.crawlSourceUrl, - title: '', - description: '', - type: [], - thumbUrl: `${row.crawlSourceUrl}/thumb`, - descAuthor: {url: null} - } - } - return { - pathname: row.pathname, - author, - subtype: row.subtype, - href: row.href, - title: row.title, - description: row.description, - tags: row.tags ? row.tags.split(',').filter(Boolean) : [], - createdAt: new Date(row.createdAt).toISOString(), - updatedAt: row.updatedAt ? new Date(row.updatedAt).toISOString() : null, - visibility: 'public' // TODO visibility - } -} diff --git a/crawler/posts.js b/crawler/posts.js deleted file mode 100644 index d7a93f59..00000000 --- a/crawler/posts.js +++ /dev/null @@ -1,357 +0,0 @@ -const assert = require('assert') -const {URL} = require('url') -const Events = require('events') -const Ajv = require('ajv') -const logger = require('../logger').child({category: 'crawler', dataset: 'posts'}) -const db = require('../dbs/profile-data-db') -const crawler = require('./index') -const datLibrary = require('../dat/library') -const lock = require('../lib/lock') -const knex = require('../lib/knex') -const siteDescriptions = require('./site-descriptions') -const {doCrawl, doCheckpoint, emitProgressEvent, getMatchingChangesInOrder, generateTimeFilename, ensureDirectory} = require('./util') -const postSchema = require('./json-schemas/post') - -// constants -// = - -const TABLE_VERSION = 2 -const JSON_TYPE = 'unwalled.garden/post' -const JSON_PATH_REGEX = /^\/data\/posts\/([^/]+)\.json$/i - -// typedefs -// = - -/** - * @typedef {import('../dat/library').InternalDatArchive} InternalDatArchive - * @typedef {import('./util').CrawlSourceRecord} CrawlSourceRecord - * @typedef { import("./site-descriptions").SiteDescription } SiteDescription - * - * @typedef {Object} Post - * @prop {string} pathname - * @prop {string} body - * @prop {string} createdAt - * @prop {string} updatedAt - * @prop {SiteDescription} author - * @prop {string} visibility - */ - -// globals -// = - -const events = new Events() -const ajv = (new Ajv()) -const validatePost = ajv.compile(postSchema) - -// exported api -// = - -exports.on = events.on.bind(events) -exports.addListener = events.addListener.bind(events) -exports.removeListener = events.removeListener.bind(events) - -/** - * @description - * Crawl the given site for posts. - * - * @param {InternalDatArchive} archive - site to crawl. - * @param {CrawlSourceRecord} crawlSource - internal metadata about the crawl target. - * @returns {Promise} - */ -exports.crawlSite = async function (archive, crawlSource) { - return doCrawl(archive, crawlSource, 'crawl_posts', TABLE_VERSION, async ({changes, resetRequired}) => { - const supressEvents = resetRequired === true // dont emit when replaying old info - logger.silly('Crawling posts', {details: {url: archive.url, numChanges: changes.length, resetRequired}}) - if (resetRequired) { - // reset all data - logger.debug('Resetting dataset', {details: {url: archive.url}}) - await db.run(` - DELETE FROM crawl_posts WHERE crawlSourceId = ? - `, [crawlSource.id]) - await doCheckpoint('crawl_posts', TABLE_VERSION, crawlSource, 0) - } - - // collect changed posts - var changedPosts = getMatchingChangesInOrder(changes, JSON_PATH_REGEX) - if (changedPosts.length) { - logger.verbose('Collected new/changed post files', {details: {url: archive.url, changedPosts: changedPosts.map(p => p.name)}}) - } else { - logger.debug('No new post-files found', {details: {url: archive.url}}) - } - emitProgressEvent(archive.url, 'crawl_posts', 0, changedPosts.length) - - // read and apply each post in order - var progress = 0 - for (let changedPost of changedPosts) { - // TODO Currently the crawler will abort reading the feed if any post fails to load - // this means that a single unreachable file can stop the forward progress of post indexing - // to solve this, we need to find a way to tolerate unreachable post-files without losing our ability to efficiently detect new posts - // -prf - if (changedPost.type === 'del') { - // delete - await db.run(` - DELETE FROM crawl_posts WHERE crawlSourceId = ? AND pathname = ? - `, [crawlSource.id, changedPost.name]) - events.emit('post-removed', archive.url) - } else { - // read - let postString - try { - postString = await archive.pda.readFile(changedPost.name, 'utf8') - } catch (err) { - logger.warn('Failed to read post file, aborting', {details: {url: archive.url, name: changedPost.name, err}}) - return // abort indexing - } - - // parse and validate - let post - try { - post = JSON.parse(postString) - let valid = validatePost(post) - if (!valid) throw ajv.errorsText(validatePost.errors) - } catch (err) { - logger.warn('Failed to parse post file, skipping', {details: {url: archive.url, name: changedPost.name, err}}) - continue // skip - } - - // massage the post - post.createdAt = Number(new Date(post.createdAt)) - post.updatedAt = Number(new Date(post.updatedAt)) - if (isNaN(post.updatedAt)) post.updatedAt = 0 // optional - - // upsert - let existingPost = await get(joinPath(archive.url, changedPost.name)) - if (existingPost) { - await db.run(` - UPDATE crawl_posts - SET crawledAt = ?, body = ?, createdAt = ?, updatedAt = ? - WHERE crawlSourceId = ? AND pathname = ? - `, [Date.now(), post.body, post.createdAt, post.updatedAt, crawlSource.id, changedPost.name]) - events.emit('post-updated', archive.url) - } else { - await db.run(` - INSERT INTO crawl_posts (crawlSourceId, pathname, crawledAt, body, createdAt, updatedAt) - VALUES (?, ?, ?, ?, ?, ?) - `, [crawlSource.id, changedPost.name, Date.now(), post.body, post.createdAt, post.updatedAt]) - events.emit('post-added', archive.url) - } - } - - // checkpoint our progress - await doCheckpoint('crawl_posts', TABLE_VERSION, crawlSource, changedPost.version) - emitProgressEvent(archive.url, 'crawl_posts', ++progress, changedPosts.length) - } - logger.silly(`Finished crawling posts`, {details: {url: archive.url}}) - }) -} - -/** - * @description - * List crawled posts. - * - * @param {Object} [opts] - * @param {Object} [opts.filters] - * @param {string|string[]} [opts.filters.authors] - * @param {string} [opts.filters.visibility] - * @param {string} [opts.sortBy] - * @param {number} [opts.offset=0] - * @param {number} [opts.limit] - * @param {boolean} [opts.reverse] - * @returns {Promise>} - */ -exports.list = async function (opts) { - // TODO: handle visibility - // TODO: sortBy options - - // validate & parse params - if (opts && 'sortBy' in opts) assert(typeof opts.sortBy === 'string', 'SortBy must be a string') - if (opts && 'offset' in opts) assert(typeof opts.offset === 'number', 'Offset must be a number') - if (opts && 'limit' in opts) assert(typeof opts.limit === 'number', 'Limit must be a number') - if (opts && 'reverse' in opts) assert(typeof opts.reverse === 'boolean', 'Reverse must be a boolean') - if (opts && opts.filters) { - if ('authors' in opts.filters) { - if (Array.isArray(opts.filters.authors)) { - assert(opts.filters.authors.every(v => typeof v === 'string'), 'Authors filter must be a string or array of strings') - } else { - assert(typeof opts.filters.authors === 'string', 'Authors filter must be a string or array of strings') - opts.filters.authors = [opts.filters.authors] - } - opts.filters.authors = await Promise.all(opts.filters.authors.map(datLibrary.getPrimaryUrl)) - } - if ('visibility' in opts.filters) { - assert(typeof opts.filters.visibility === 'string', 'Visibility filter must be a string') - } - } - - // build query - var sql = knex('crawl_posts') - .select('crawl_posts.*') - .select('crawl_sources.url AS crawlSourceUrl') - .innerJoin('crawl_sources', 'crawl_sources.id', '=', 'crawl_posts.crawlSourceId') - .orderBy('crawl_posts.createdAt', opts.reverse ? 'DESC' : 'ASC') - if (opts && opts.filters && opts.filters.authors) { - sql = sql.whereIn('crawl_sources.url', opts.filters.authors) - } - if (opts && opts.limit) sql = sql.limit(opts.limit) - if (opts && opts.offset) sql = sql.offset(opts.offset) - - // execute query - var rows = await db.all(sql) - return Promise.all(rows.map(massagePostRow)) -} - -/** - * @description - * Get crawled post. - * - * @param {string} url - The URL of the post - * @returns {Promise} - */ -const get = exports.get = async function (url) { - // validate & parse params - var urlParsed - if (url) { - try { urlParsed = new URL(url) } - catch (e) { throw new Error('Invalid URL: ' + url) } - } - - // execute query - var sql = knex('crawl_posts') - .select('crawl_posts.*') - .select('crawl_sources.url AS crawlSourceUrl') - .innerJoin('crawl_sources', function () { - this.on('crawl_sources.id', '=', 'crawl_posts.crawlSourceId') - .andOn('crawl_sources.url', '=', knex.raw('?', `${urlParsed.protocol}//${urlParsed.hostname}`)) - }) - .where('crawl_posts.pathname', urlParsed.pathname) - return await massagePostRow(await db.get(sql)) -} - -/** - * @description - * Create a new post. - * - * @param {InternalDatArchive} archive - where to write the post to. - * @param {Object} post - * @param {string} post.body - * @param {string} post.visibility - * @returns {Promise} url - */ -exports.add = async function (archive, post) { - // TODO visibility - - var postObject = { - type: JSON_TYPE, - body: post.body, - createdAt: (new Date()).toISOString() - } - var valid = validatePost(postObject) - if (!valid) throw ajv.errorsText(validatePost.errors) - - var filename = generateTimeFilename() - var filepath = `/data/posts/${filename}.json` - await ensureDirectory(archive, '/data') - await ensureDirectory(archive, '/data/posts') - await archive.pda.writeFile(filepath, JSON.stringify(postObject, null, 2)) - await crawler.crawlSite(archive) - return archive.url + filepath -} - -/** - * @description - * Update the content of an existing post. - * - * @param {InternalDatArchive} archive - where to write the post to. - * @param {string} pathname - the pathname of the post. - * @param {Object} post - * @param {string} [post.body] - * @param {string} [post.visibility] - * @returns {Promise} - */ -exports.edit = async function (archive, pathname, post) { - // TODO visibility - - var release = await lock('crawler:posts:' + archive.url) - try { - // fetch post - var existingPost = await get(archive.url + pathname) - if (!existingPost) throw new Error('Post not found') - - // update post content - var postObject = { - type: JSON_TYPE, - body: ('body' in post) ? post.body : existingPost.body, - createdAt: existingPost.createdAt, - updatedAt: (new Date()).toISOString() - } - - // validate - var valid = validatePost(postObject) - if (!valid) throw ajv.errorsText(validatePost.errors) - - // write - await archive.pda.writeFile(pathname, JSON.stringify(postObject, null, 2)) - await crawler.crawlSite(archive) - } finally { - release() - } -} - -/** - * @description - * Delete an existing post - * - * @param {InternalDatArchive} archive - where to write the post to. - * @param {string} pathname - the pathname of the post. - * @returns {Promise} - */ -exports.remove = async function (archive, pathname) { - assert(typeof pathname === 'string', 'Remove() must be provided a valid URL string') - await archive.pda.unlink(pathname) - await crawler.crawlSite(archive) -} - -// internal methods -// = - -/** - * @param {string} origin - * @param {string} pathname - * @returns {string} - */ -function joinPath (origin, pathname) { - if (origin.endsWith('/') && pathname.startsWith('/')) { - return origin + pathname.slice(1) - } - if (!origin.endsWith('/') && !pathname.startsWith('/')) { - return origin + '/' + pathname - } - return origin + pathname -} - -/** - * @param {Object} row - * @returns {Promise} - */ -async function massagePostRow (row) { - if (!row) return null - var author = await siteDescriptions.getBest({subject: row.crawlSourceUrl}) - if (!author) { - author = { - url: row.crawlSourceUrl, - title: '', - description: '', - type: [], - thumbUrl: `${row.crawlSourceUrl}/thumb`, - descAuthor: {url: null} - } - } - return { - pathname: row.pathname, - author, - body: row.body, - createdAt: new Date(row.createdAt).toISOString(), - updatedAt: row.updatedAt ? new Date(row.updatedAt).toISOString() : null, - visibility: 'public' // TODO visibility - } -} diff --git a/crawler/search.js b/crawler/search.js deleted file mode 100644 index a8d7e0c7..00000000 --- a/crawler/search.js +++ /dev/null @@ -1,468 +0,0 @@ -const _groupBy = require('lodash.groupby') -const _uniqWith = require('lodash.uniqwith') -const db = require('../dbs/profile-data-db') -const bookmarksDb = require('../dbs/bookmarks') -const historyDb = require('../dbs/history') -const datLibrary = require('../dat/library') -const follows = require('./follows') -const siteDescriptions = require('./site-descriptions') -const {getSiteDescriptionThumbnailUrl} = require('./util') -const knex = require('../lib/knex') -const users = require('../users') - -const KNOWN_SITE_TYPES = [ - 'unwalled.garden/person', - 'unwalled.garden/theme' -] - -// typedefs -// = - -/** - * @typedef {import("./site-descriptions").SiteDescription} SiteDescription - * @typedef {import("../dbs/archives").LibraryArchiveRecord} LibraryArchiveRecord - * - * @typedef {Object} SuggestionResults - * @prop {Array} bookmarks - * @prop {Array} websites - * @prop {Array} people - * @prop {Array} themes - * @prop {(undefined|Array)} history - * - * TODO: define the SuggestionResults values - * - * @typedef {Object} SearchResults - * @prop {number} highlightNonce - A number used to create perimeters around text that should be highlighted. - * @prop {Array} results - * - * @typedef {Object} SearchResultAuthor - * @prop {string} url - * @prop {string} title - * @prop {string} description - * @prop {Array} type - * - * @typedef {Object} SearchResultRecord - * @prop {string} type - * @prop {string} url - * @prop {number} crawledAt - * @prop {SearchResultAuthor} author - * - * @typedef {Object} SiteSearchResult - * @prop {SearchResultRecord} record - * @prop {string} url - * @prop {string} title - * @prop {string} description - * @prop {Array} type - * - * @typedef {Object} PostSearchResult - * @prop {SearchResultRecord} record - * @prop {string} url - * @prop {Object} content - * @prop {string} content.body - * @prop {number} createdAt - * @prop {number} updatedAt - * - * @typedef {Object} BookmarkSearchResult - * @prop {SearchResultRecord} record - * @prop {string} url - * @prop {Object} content - * @prop {string} content.href - * @prop {string} content.title - * @prop {string} content.description - * @prop {number} createdAt - * @prop {number} updatedAt - */ - -// exported api -// = - -/** - * @description - * Get suggested content of various types. - * - * @param {string} user - The current user's URL. - * @param {string} [query=''] - The search query. - * @param {Object} [opts={}] - * @param {boolean} [opts.filterPins] - If true, will filter out pinned bookmarks. - * @returns {Promise} - */ -exports.listSuggestions = async function (user, query = '', opts = {}) { - var suggestions = { - bookmarks: [], - websites: [], - people: [], - themes: [], - history: undefined - } - const filterFn = a => query ? ((a.url || a.href).includes(query) || a.title.toLowerCase().includes(query)) : true - const sortFn = (a, b) => (a.title||'').localeCompare(b.title||'') - function dedup (arr) { - var hits = new Set() - return arr.filter(item => { - if (hits.has(item.url)) return false - hits.add(item.url) - return true - }) - } - - var userId = (await users.get(user)).id - - // bookmarks - var bookmarkResults = await bookmarksDb.listBookmarks(0) - if (opts.filterPins) { - bookmarkResults = bookmarkResults.filter(b => !b.pinned && filterFn(b)) - } else { - bookmarkResults = bookmarkResults.filter(filterFn) - } - bookmarkResults.sort(sortFn) - bookmarkResults = bookmarkResults.slice(0, 12) - suggestions.bookmarks = bookmarkResults.map(b => ({title: b.title, url: b.href})) - - // websites - suggestions.websites = /** @type LibraryArchiveRecord[] */(await datLibrary.queryArchives({isSaved: true})) - suggestions.websites = suggestions.websites.filter(w => ( - w.url !== user // filter out the user's site - && (!w.type || !w.type.find(t => KNOWN_SITE_TYPES.includes(t))) // filter out other site types - )) - suggestions.websites = suggestions.websites.filter(filterFn) - suggestions.websites.sort(sortFn) - - // people - suggestions.people = (await follows.list({filters: {authors: user}})).map(({topic}) => topic) - suggestions.people = (await datLibrary.queryArchives({isSaved: true, type: 'unwalled.garden/person'})).concat(suggestions.people) - suggestions.people = dedup(suggestions.people) - suggestions.people = suggestions.people.filter(filterFn) - suggestions.people.sort(sortFn) - - // themes - suggestions.themes = /** @type LibraryArchiveRecord[] */(await datLibrary.queryArchives({isSaved: true, type: 'unwalled.garden/theme'})) - suggestions.themes = suggestions.themes.filter(filterFn) - suggestions.themes.sort(sortFn) - - if (query) { - // history - var historyResults = await historyDb.search(query) - suggestions.history = historyResults.slice(0, 12) - suggestions.history.sort((a, b) => a.url.length - b.url.length) // shorter urls at top - } - - return suggestions -} - -/** - * @description - * Run a search query against crawled data. - * - * @param {string} user - The current user's URL. - * @param {Object} opts - * @param {string} [opts.query] - The search query. - * @param {Object} [opts.filters] - * @param {string|string[]} [opts.filters.datasets] - Filter results to the given datasets. Defaults to 'all'. Valid values: 'all', 'sites', 'unwalled.garden/post', 'unwalled.garden/bookmark'. - * @param {number} [opts.filters.since] - Filter results to items created since the given timestamp. - * @param {number} [opts.hops=1] - How many hops out in the user's follow graph should be included? Valid values: 1, 2. - * @param {number} [opts.offset] - * @param {number} [opts.limit = 20] - * @returns {Promise} - */ -exports.query = async function (user, opts) { - const highlightNonce = (Math.random() * 1e3)|0 - const startHighlight = `{${highlightNonce}}` - const endHighlight = `{/${highlightNonce}}` - - var searchResults = { - highlightNonce, - results: [] - } - var {query, hops, filters, offset, limit} = Object.assign({}, { - query: undefined, - hops: 1, - filters: {}, - offset: 0, - limit: 20 - }, opts) - var {datasets, since} = Object.assign({}, { - datasets: 'all', - since: 0 - }, filters) - hops = Math.min(Math.max(Math.floor(hops), 1), 2) // clamp to [1, 2] for now - var datasetValues = (typeof datasets === 'undefined') - ? ['all'] - : Array.isArray(datasets) ? datasets : [datasets] - - // prep search terms - if (query && typeof query === 'string') { - query = query - .replace(/[^a-z0-9]/ig, ' ') // strip symbols that sqlite interprets. - .toLowerCase() // all lowercase. (uppercase is interpretted as a directive by sqlite.) - query += '*' // match prefixes - } - - // get user's crawl_source id - var userCrawlSourceId - { - let res = await db.get(`SELECT id FROM crawl_sources WHERE url = ?`, [user]) - userCrawlSourceId = res.id - } - - // construct set of crawl sources to query - var crawlSourceIds - if (hops === 2) { - // the user and all followed sources - let res = await db.all(` - SELECT id FROM crawl_sources src - INNER JOIN crawl_follows follows ON follows.destUrl = src.url AND follows.crawlSourceId = ? - `, [userCrawlSourceId]) - crawlSourceIds = [userCrawlSourceId].concat(res.map(({id}) => id)) - } else if (hops === 1) { - // just the user - crawlSourceIds = [userCrawlSourceId] - } - - // run queries - if (datasetValues.includes('all') || datasetValues.includes('sites')) { - // SITES - let rows = await db.all(buildSitesSearchQuery({ - query, - crawlSourceIds, - user, - userCrawlSourceId, - since, - limit, - offset, - startHighlight, - endHighlight - })) - rows = _uniqWith(rows, (a, b) => a.url === b.url) // remove duplicates - rows = await Promise.all(rows.map(massageSiteSearchResult)) - searchResults.results = searchResults.results.concat(rows) - } - if (datasetValues.includes('all') || datasets.includes('unwalled.garden/post')) { - // POSTS - let rows = await db.all(buildPostsSearchQuery({ - query, - crawlSourceIds, - userCrawlSourceId, - since, - limit, - offset, - startHighlight, - endHighlight - })) - rows = await Promise.all(rows.map(massagePostSearchResult)) - searchResults.results = searchResults.results.concat(rows) - } - if (datasetValues.includes('all') || datasets.includes('unwalled.garden/bookmark')) { - // BOOKMARKS - let rows = await db.all(buildBookmarksSearchQuery({ - query, - crawlSourceIds, - userCrawlSourceId, - since, - limit, - offset, - startHighlight, - endHighlight - })) - rows = await Promise.all(rows.map(massageBookmarkSearchResult)) - searchResults.results = searchResults.results.concat(rows) - } - - // sort and apply limit again - searchResults.results.sort((a, b) => b.record.crawledAt - a.record.crawledAt) - searchResults.results = searchResults.results.slice(0, limit) - - return searchResults -} - -// internal methods -// = - -function buildSitesSearchQuery ({query, crawlSourceIds, user, userCrawlSourceId, since, limit, offset, startHighlight, endHighlight}) { - let sql = knex(query ? 'crawl_site_descriptions_fts_index' : 'crawl_site_descriptions') - .select('crawl_site_descriptions.url AS url') - .select('crawl_sources.url AS authorUrl') - .select('crawl_site_descriptions.crawledAt') - .where(builder => builder - .whereIn('crawl_follows.crawlSourceId', crawlSourceIds) // description by a followed user - .orWhere(builder => builder - .where('crawl_site_descriptions.url', user) // about me and... - .andWhere('crawl_site_descriptions.crawlSourceId', userCrawlSourceId) // by me - ) - ) - .where('crawl_site_descriptions.crawledAt', '>=', since) - .orderBy('crawl_site_descriptions.crawledAt') - .limit(limit) - .offset(offset) - if (query) { - sql = sql - .select(knex.raw(`SNIPPET(crawl_site_descriptions_fts_index, 0, '${startHighlight}', '${endHighlight}', '...', 25) AS title`)) - .select(knex.raw(`SNIPPET(crawl_site_descriptions_fts_index, 1, '${startHighlight}', '${endHighlight}', '...', 25) AS description`)) - .innerJoin('crawl_site_descriptions', 'crawl_site_descriptions.rowid', '=', 'crawl_site_descriptions_fts_index.rowid') - .leftJoin('crawl_follows', 'crawl_follows.destUrl', '=', 'crawl_site_descriptions.url') - .innerJoin('crawl_sources', 'crawl_sources.id', '=', 'crawl_site_descriptions.crawlSourceId') - .whereRaw('crawl_site_descriptions_fts_index MATCH ?', [query]) - } else { - sql = sql - .select('crawl_site_descriptions.title') - .select('crawl_site_descriptions.description') - .leftJoin('crawl_follows', 'crawl_follows.destUrl', '=', 'crawl_site_descriptions.url') - .innerJoin('crawl_sources', 'crawl_sources.id', '=', 'crawl_site_descriptions.crawlSourceId') - } - return sql -} - -function buildPostsSearchQuery ({query, crawlSourceIds, userCrawlSourceId, since, limit, offset, startHighlight, endHighlight}) { - let sql = knex(query ? 'crawl_posts_fts_index' : 'crawl_posts') - .select('crawl_posts.pathname') - .select('crawl_posts.crawledAt') - .select('crawl_posts.createdAt') - .select('crawl_posts.updatedAt') - .select('crawl_sources.url AS authorUrl') - .where(builder => builder - .whereIn('crawl_follows.crawlSourceId', crawlSourceIds) // published by someone I follow - .orWhere('crawl_posts.crawlSourceId', userCrawlSourceId) // or by me - ) - .andWhere('crawl_posts.crawledAt', '>=', since) - .orderBy('crawl_posts.crawledAt') - .limit(limit) - .offset(offset) - if (query) { - sql = sql - .select(knex.raw(`SNIPPET(crawl_posts_fts_index, 0, '${startHighlight}', '${endHighlight}', '...', 25) AS body`)) - .innerJoin('crawl_posts', 'crawl_posts.rowid', '=', 'crawl_posts_fts_index.rowid') - .innerJoin('crawl_sources', 'crawl_sources.id', '=', 'crawl_posts.crawlSourceId') - .leftJoin('crawl_follows', 'crawl_follows.destUrl', '=', 'crawl_sources.url') - .whereRaw('crawl_posts_fts_index MATCH ?', [query]) - } else { - sql = sql - .select('crawl_posts.body') - .innerJoin('crawl_sources', 'crawl_sources.id', '=', 'crawl_posts.crawlSourceId') - .leftJoin('crawl_follows', 'crawl_follows.destUrl', '=', 'crawl_sources.url') - } - return sql -} - -function buildBookmarksSearchQuery ({query, crawlSourceIds, userCrawlSourceId, since, limit, offset, startHighlight, endHighlight}) { - let sql = knex(query ? 'crawl_bookmarks_fts_index' : 'crawl_bookmarks') - .select('crawl_bookmarks.pathname') - .select('crawl_bookmarks.crawledAt') - .select('crawl_bookmarks.createdAt') - .select('crawl_bookmarks.updatedAt') - .select('crawl_sources.url AS authorUrl') - .where(builder => builder - .whereIn('crawl_follows.crawlSourceId', crawlSourceIds) // published by someone I follow - .orWhere('crawl_bookmarks.crawlSourceId', userCrawlSourceId) // or by me - ) - .andWhere('crawl_bookmarks.crawledAt', '>=', since) - .orderBy('crawl_bookmarks.crawledAt') - .limit(limit) - .offset(offset) - if (query) { - sql = sql - .select('crawl_bookmarks.href') - .select(knex.raw(`SNIPPET(crawl_bookmarks_fts_index, 0, '${startHighlight}', '${endHighlight}', '...', 25) AS title`)) - .select(knex.raw(`SNIPPET(crawl_bookmarks_fts_index, 1, '${startHighlight}', '${endHighlight}', '...', 25) AS description`)) - .innerJoin('crawl_bookmarks', 'crawl_bookmarks.rowid', '=', 'crawl_bookmarks_fts_index.rowid') - .innerJoin('crawl_sources', 'crawl_sources.id', '=', 'crawl_bookmarks.crawlSourceId') - .leftJoin('crawl_follows', 'crawl_follows.destUrl', '=', 'crawl_sources.url') - .whereRaw('crawl_bookmarks_fts_index MATCH ?', [query]) - } else { - sql = sql - .select('crawl_bookmarks.href') - .select('crawl_bookmarks.title') - .select('crawl_bookmarks.description') - .innerJoin('crawl_sources', 'crawl_sources.id', '=', 'crawl_bookmarks.crawlSourceId') - .leftJoin('crawl_follows', 'crawl_follows.destUrl', '=', 'crawl_sources.url') - } - return sql -} - -/** - * @param {Object} row - * @returns {Promise} - */ -async function massageSiteSearchResult (row) { - // fetch additional info - var author = await siteDescriptions.getBest({subject: row.authorUrl}) - - // massage attrs - return { - record: { - type: 'site', - url: row.url, - author: { - url: author.url, - title: author.title, - description: author.description, - type: author.type - }, - crawledAt: row.crawledAt, - }, - url: row.url, - title: row.title, - description: row.description, - type: row.type - } -} - -/** - * @param {Object} row - * @returns {Promise} - */ -async function massagePostSearchResult (row) { - // fetch additional info - var author = await siteDescriptions.getBest({subject: row.authorUrl}) - - // massage attrs - var url = row.authorUrl + row.pathname - return { - record: { - type: 'unwalled.garden/post', - url, - author: { - url: author.url, - title: author.title, - description: author.description, - type: author.type - }, - crawledAt: row.crawledAt, - }, - url, - content: {body: row.body}, - createdAt: row.createdAt, - updatedAt: row.updatedAt - } -} - -/** - * @param {Object} row - * @returns {Promise} - */ -async function massageBookmarkSearchResult (row) { - // fetch additional info - var author = await siteDescriptions.getBest({subject: row.authorUrl}) - - // massage attrs - var url = row.authorUrl + row.pathname - return { - record: { - type: 'unwalled.garden/bookmark', - url, - author: { - url: author.url, - title: author.title, - description: author.description, - type: author.type - }, - crawledAt: row.crawledAt, - }, - url, - content: { - href: row.href, - title: row.title, - description: row.description - }, - createdAt: row.createdAt, - updatedAt: row.updatedAt - } -} \ No newline at end of file diff --git a/crawler/site-descriptions.js b/crawler/site-descriptions.js deleted file mode 100644 index 6011815f..00000000 --- a/crawler/site-descriptions.js +++ /dev/null @@ -1,364 +0,0 @@ -const assert = require('assert') -const {URL} = require('url') -const Events = require('events') -const logger = require('../logger').child({category: 'crawler', dataset: 'site-descriptions'}) -const db = require('../dbs/profile-data-db') -const dat = require('../dat') -const crawler = require('./index') -const { - doCrawl, - doCheckpoint, - emitProgressEvent, - getMatchingChangesInOrder, - getSiteDescriptionThumbnailUrl, - toHostname -} = require('./util') - -// constants -// = - -const TABLE_VERSION = 1 -const JSON_PATH_REGEX = /^\/(dat\.json|data\/known-sites\/([^/]+)\/dat\.json)$/i - -// typedefs -// = - -/** - * @typedef {import('../dat/library').InternalDatArchive} InternalDatArchive - * @typedef {import('./util').CrawlSourceRecord} CrawlSourceRecord - * - * @typedef {Object} SiteDescription - * @prop {string} url - * @prop {string} title - * @prop {string} description - * @prop {Array} type - * @prop {string} thumbUrl - * @prop {Object} descAuthor - * @prop {string} descAuthor.url - * @prop {boolean} [followsUser] - does this site follow the specified user site? - * @prop {Array} [followedBy] - list of sites following this site. - */ - -// globals -// = - -var events = new Events() - -// exported api -// = - -exports.on = events.on.bind(events) -exports.addListener = events.addListener.bind(events) -exports.removeListener = events.removeListener.bind(events) - -/** - * @description - * Crawl the given site for site descriptions. - * - * @param {InternalDatArchive} archive - site to crawl. - * @param {CrawlSourceRecord} crawlSource - internal metadata about the crawl target. - * @returns {Promise} - */ -exports.crawlSite = async function (archive, crawlSource) { - return doCrawl(archive, crawlSource, 'crawl_site_descriptions', TABLE_VERSION, async ({changes, resetRequired}) => { - const supressEvents = resetRequired === true // dont emit when replaying old info - logger.silly('Crawling site descriptions', {details: {url: archive.url, numChanges: changes.length, resetRequired}}) - if (resetRequired) { - // reset all data - logger.debug('Resetting dataset', {details: {url: archive.url}}) - await db.run(` - DELETE FROM crawl_site_descriptions WHERE crawlSourceId = ? - `, [crawlSource.id]) - await doCheckpoint('crawl_site_descriptions', TABLE_VERSION, crawlSource, 0) - } - - // collect changed site descriptions - var changedSiteDescriptions = getMatchingChangesInOrder(changes, JSON_PATH_REGEX) - if (changedSiteDescriptions.length > 0) { - logger.verbose('Collected new/changed site-description files', {details: {url: archive.url, changedFiles: changedSiteDescriptions.map(p => p.name)}}) - } else { - logger.debug('No new site-description files found', {details: {url: archive.url}}) - } - emitProgressEvent(archive.url, 'crawl_site_descriptions', 0, changedSiteDescriptions.length) - - // read and apply each post in order - var progress = 0 - for (let changedSiteDescription of changedSiteDescriptions) { - // TODO Currently the crawler will abort reading the feed if any description fails to load - // this means that a single unreachable file can stop the forward progress of description indexing - // to solve this, we need to find a way to tolerate bad description-files without losing our ability to efficiently detect new posts - // -prf - - // determine the url - let url = getUrlFromDescriptionPath(archive, changedSiteDescription.name) - - if (changedSiteDescription.type === 'del') { - // delete - await db.run(` - DELETE FROM crawl_site_descriptions WHERE crawlSourceId = ? AND url = ? - `, [crawlSource.id, url]) - events.emit('description-removed', archive.url) - } else { - // read - let descString - try { - descString = await archive.pda.readFile(changedSiteDescription.name, 'utf8') - } catch (err) { - logger.warn('Failed to read dat.json file, aborting', {details: {url: archive.url, name: changedSiteDescription.name, err}}) - return // abort indexing - } - - // parse and validate - let desc - try { - desc = JSON.parse(descString) - assert(typeof desc === 'object', 'File be an object') - } catch (err) { - logger.warn('Failed to parse dat.json file, aborting', {details: {url: archive.url, name: changedSiteDescription.name, err}}) - continue // skip - } - - // massage the description - desc.title = typeof desc.title === 'string' ? desc.title : '' - desc.description = typeof desc.description === 'string' ? desc.description : '' - if (typeof desc.type === 'string') desc.type = desc.type.split(',') - if (Array.isArray(desc.type)) { - desc.type = desc.type.filter(isString) - } else { - desc.type = [] - } - - // replace - await db.run(` - DELETE FROM crawl_site_descriptions WHERE crawlSourceId = ? AND url = ? - `, [crawlSource.id, url]) - await db.run(` - INSERT INTO crawl_site_descriptions (crawlSourceId, crawledAt, url, title, description, type) - VALUES (?, ?, ?, ?, ?, ?) - `, [crawlSource.id, Date.now(), url, desc.title, desc.description, desc.type.join(',')]) - events.emit('description-added', archive.url) - } - - // checkpoint our progress - logger.silly(`Finished crawling site descriptions`, {details: {url: archive.url}}) - await doCheckpoint('crawl_site_descriptions', TABLE_VERSION, crawlSource, changedSiteDescription.version) - emitProgressEvent(archive.url, 'crawl_site_descriptions', ++progress, changedSiteDescription.length) - } - }) -} - -/** - * @description - * List crawled site descriptions. - * - * @param {Object} [opts] - * @param {string | Array} [opts.subject] - (URL) filter descriptions to those which describe this subject. - * @param {string | Array} [opts.author] - (URL) filter descriptions to those created by this author. - * @param {number} [opts.offset] - * @param {number} [opts.limit] - * @param {boolean} [opts.reverse] - * @returns {Promise>} - */ -const list = exports.list = async function ({offset, limit, reverse, author, subject} = {}) { - // validate & parse params - assert(!offset || typeof offset === 'number', 'Offset must be a number') - assert(!limit || typeof limit === 'number', 'Limit must be a number') - assert(!reverse || typeof reverse === 'boolean', 'Reverse must be a boolean') - assert(!author || typeof author === 'string' || (Array.isArray(author) && author.every(isString)), 'Author must be a string or an array of strings') - assert(!subject || typeof subject === 'string' || (Array.isArray(subject) && subject.every(isString)), 'Subject must be a string or an array of strings') - - if (author) { - author = Array.isArray(author) ? author : [author] - try { author = await Promise.all(author.map(dat.library.getPrimaryUrl)) } - catch (e) { throw new Error('Author must contain valid URLs') } - } - if (subject) { - subject = Array.isArray(subject) ? subject : [subject] - try { subject = await Promise.all(subject.map(dat.library.getPrimaryUrl)) } - catch (e) { throw new Error('Subject must contain valid URLs') } - } - - // build query - var query = ` - SELECT crawl_site_descriptions.*, src.url AS crawlSourceUrl FROM crawl_site_descriptions - INNER JOIN crawl_sources src ON src.id = crawl_site_descriptions.crawlSourceId - ` - var values = [] - - if (author || subject) { - query += ` WHERE ` - } - - if (author) { - query += `(` - let op = `` - for (let a of author) { - query += `${op} src.url = ?` - op = ` OR` - values.push(a) - } - query += `) ` - } - if (subject) { - if (author) { - query += ` AND ` - } - query += `(` - let op = `` - for (let s of subject) { - query += `${op} crawl_site_descriptions.url = ?` - op = ` OR` - values.push(s) - } - query += `) ` - } - if (reverse) { - query += ` DESC` - } - if (limit) { - query += ` LIMIT ?` - values.push(limit) - } - if (offset) { - query += ` OFFSET ?` - values.push(offset) - } - - // execute query - return (await db.all(query, values)).map(massageSiteDescriptionRow) -} - -/** - * @description - * Get the most trustworthy site description available. - * - * @param {Object} [opts] - * @param {string} [opts.subject] - (URL) filter descriptions to those which describe this subject. - * @param {string} [opts.author] - (URL) filter descriptions to those created by this author. - * @returns {Promise} - */ -exports.getBest = async function ({subject, author} = {}) { - // TODO choose based on trust - var descriptions = await list({subject, author}) - return descriptions[0] -} - -/** - * @description - * Capture a site description into the archive's known-sites cache. - * - * @param {InternalDatArchive} archive - where to write the capture to. - * @param {(InternalDatArchive|string)} subject - which archive to capture. - * @returns Promise - */ -exports.capture = async function (archive, subject) { - var subjectArchive - if (typeof subject === 'string') { - subjectArchive = await dat.library.getOrLoadArchive(subject) - } else { - subjectArchive = subject - } - - // create directory - var hostname = toHostname(subjectArchive.url) - await ensureDirectory(archive, '/data') - await ensureDirectory(archive, '/data/known-sites') - await ensureDirectory(archive, `/data/known-sites/${hostname}`) - - // capture dat.json - try { - var datJson = JSON.parse(await subjectArchive.pda.readFile('/dat.json')) - } catch (err) { - logger.warn('Failed to read dat.json of subject archive', {details: {err}}) - throw new Error('Unabled to read subject dat.json') - } - await archive.pda.writeFile(`/data/known-sites/${hostname}/dat.json`, JSON.stringify(datJson, null, 2)) - - // capture thumb - for (let ext of ['jpg', 'jpeg', 'png']) { - let thumbPath = `/thumb.${ext}` - if (await fileExists(subjectArchive, thumbPath)) { - let targetPath = `/data/known-sites/${hostname}/thumb.${ext}` - await archive.pda.writeFile(targetPath, await subjectArchive.pda.readFile(thumbPath, 'binary'), 'binary') - break - } - } -} - -/** - * @description - * Delete a captured site description in the given archive's known-sites cache. - * - * @param {InternalDatArchive} archive - where to remove the capture from. - * @param {(InternalDatArchive|string)} subject - which archive's capture to remove. - * @returns Promise - */ -exports.deleteCapture = async function (archive, subject) { - var subjectUrl - if (typeof subject === 'string') { - subjectUrl = subject - } else { - subjectUrl = subject.url - } - assert(typeof subjectUrl === 'string', 'Delete() must be provided a valid URL string') - var hostname = toHostname(subjectUrl) - await archive.pda.rmdir(`/data/known-sites/${hostname}`, {recursive: true}) - await crawler.crawlSite(archive) -} - -// internal methods -// = - -/** - * @param {any} v - * returns {boolean} - */ -function isString (v) { - return typeof v === 'string' -} - -/** - * @param {InternalDatArchive} archive - * @param {string} name - * @returns {string} - */ -function getUrlFromDescriptionPath (archive, name) { - if (name === '/dat.json') return archive.url - var parts = name.split('/') // '/data/known-sites/{hostname}/dat.json' -> ['', 'data', 'known-sites', hostname, 'dat.json'] - return 'dat://' + parts[3] -} - -/** - * @param {InternalDatArchive} archive - * @param {string} pathname - * @returns {Promise} - */ -async function ensureDirectory (archive, pathname) { - try { await archive.pda.mkdir(pathname) } - catch (e) { /* ignore */ } -} - -/** - * @param {InternalDatArchive} archive - * @param {string} pathname - * @returns {Promise} - */ -async function fileExists (archive, pathname) { - try { await archive.pda.stat(pathname) } - catch (e) { return false } - return true -} - -/** - * @param {Object} row - * @returns {SiteDescription} - */ -function massageSiteDescriptionRow (row) { - if (!row) return null - row.author = {url: row.crawlSourceUrl} - row.type = row.type && typeof row.type === 'string' ? row.type.split(',') : undefined - row.thumbUrl = getSiteDescriptionThumbnailUrl(row.author.url, row.url) - delete row.crawlSourceUrl - delete row.crawlSourceId - return row -} diff --git a/crawler/tags.js b/crawler/tags.js deleted file mode 100644 index 0aadb130..00000000 --- a/crawler/tags.js +++ /dev/null @@ -1,218 +0,0 @@ -const assert = require('assert') -const {URL} = require('url') -const db = require('../dbs/profile-data-db') -const knex = require('../lib/knex') -const datLibrary = require('../dat/library') -const {normalizeSchemaUrl} = require('./util') - -// typedefs -// = - -/** - * @typedef {import('../dat/library').InternalDatArchive} InternalDatArchive - * @typedef {import('./util').CrawlSourceRecord} CrawlSourceRecord - * @typedef { import("./site-descriptions").SiteDescription } SiteDescription - * - * @typedef {Object} Tag - * @prop {string} tag - * @prop {number} count - */ - -// exported api -// = - -/** - * @description - * List bookmark tags. - * - * @param {Object} [opts] - * @param {Object} [opts.filters] - * @param {string|string[]} [opts.filters.authors] - * @param {string} [opts.filters.visibility] - * @param {string} [opts.sortBy] - * @param {number} [opts.offset=0] - * @param {number} [opts.limit] - * @param {boolean} [opts.reverse] - * @returns {Promise>} - */ -exports.listBookmarkTags = async function (opts) { - // TODO: handle visibility - // TODO: sortBy options - - // validate & parse params - if (opts && 'sortBy' in opts) assert(typeof opts.sortBy === 'string', 'SortBy must be a string') - if (opts && 'offset' in opts) assert(typeof opts.offset === 'number', 'Offset must be a number') - if (opts && 'limit' in opts) assert(typeof opts.limit === 'number', 'Limit must be a number') - if (opts && 'reverse' in opts) assert(typeof opts.reverse === 'boolean', 'Reverse must be a boolean') - if (opts && opts.filters) { - if ('authors' in opts.filters) { - if (Array.isArray(opts.filters.authors)) { - assert(opts.filters.authors.every(v => typeof v === 'string'), 'Authors filter must be a string or array of strings') - } else { - assert(typeof opts.filters.authors === 'string', 'Authors filter must be a string or array of strings') - opts.filters.authors = [opts.filters.authors] - } - opts.filters.authors = await Promise.all(opts.filters.authors.map(datLibrary.getPrimaryUrl)) - } - if ('visibility' in opts.filters) { - assert(typeof opts.filters.visibility === 'string', 'Visibility filter must be a string') - } - } - - // build query - var sql = knex('crawl_tags') - .select('crawl_tags.tag') - .select(knex.raw('count(crawl_tags.id) as count')) - .innerJoin('crawl_bookmarks_tags', 'crawl_bookmarks_tags.crawlTagId', '=', 'crawl_tags.id') - .innerJoin('crawl_bookmarks', 'crawl_bookmarks_tags.crawlBookmarkId', '=', 'crawl_bookmarks.id') - .leftJoin('crawl_sources', 'crawl_bookmarks.crawlSourceId', '=', 'crawl_sources.id') - .orderBy('crawl_tags.tag', opts.reverse ? 'DESC' : 'ASC') - .groupBy('crawl_tags.tag') - if (opts && opts.filters && opts.filters.authors) { - sql = sql.whereIn('crawl_sources.url', opts.filters.authors) - } - if (opts && opts.limit) sql = sql.limit(opts.limit) - if (opts && opts.offset) sql = sql.offset(opts.offset) - - // execute query - var rows = await db.all(sql) - return rows.map(row => ({ - tag: row.tag, - count: +row.count - })) -} - -/** - * @description - * List discussion tags. - * - * @param {Object} [opts] - * @param {Object} [opts.filters] - * @param {string|string[]} [opts.filters.authors] - * @param {string} [opts.filters.visibility] - * @param {string} [opts.sortBy] - * @param {number} [opts.offset=0] - * @param {number} [opts.limit] - * @param {boolean} [opts.reverse] - * @returns {Promise>} - */ -exports.listDiscussionTags = async function (opts) { - // TODO: handle visibility - // TODO: sortBy options - - // validate & parse params - if (opts && 'sortBy' in opts) assert(typeof opts.sortBy === 'string', 'SortBy must be a string') - if (opts && 'offset' in opts) assert(typeof opts.offset === 'number', 'Offset must be a number') - if (opts && 'limit' in opts) assert(typeof opts.limit === 'number', 'Limit must be a number') - if (opts && 'reverse' in opts) assert(typeof opts.reverse === 'boolean', 'Reverse must be a boolean') - if (opts && opts.filters) { - if ('authors' in opts.filters) { - if (Array.isArray(opts.filters.authors)) { - assert(opts.filters.authors.every(v => typeof v === 'string'), 'Authors filter must be a string or array of strings') - } else { - assert(typeof opts.filters.authors === 'string', 'Authors filter must be a string or array of strings') - opts.filters.authors = [opts.filters.authors] - } - opts.filters.authors = await Promise.all(opts.filters.authors.map(datLibrary.getPrimaryUrl)) - } - if ('visibility' in opts.filters) { - assert(typeof opts.filters.visibility === 'string', 'Visibility filter must be a string') - } - } - - // build query - var sql = knex('crawl_tags') - .select('crawl_tags.tag') - .select(knex.raw('count(crawl_tags.id) as count')) - .innerJoin('crawl_discussions_tags', 'crawl_discussions_tags.crawlTagId', '=', 'crawl_tags.id') - .innerJoin('crawl_discussions', 'crawl_discussions_tags.crawlDiscussionId', '=', 'crawl_discussions.id') - .leftJoin('crawl_sources', 'crawl_discussions.crawlSourceId', '=', 'crawl_sources.id') - .orderBy('crawl_tags.tag', opts.reverse ? 'DESC' : 'ASC') - .groupBy('crawl_tags.tag') - if (opts && opts.filters && opts.filters.authors) { - sql = sql.whereIn('crawl_sources.url', opts.filters.authors) - } - if (opts && opts.limit) sql = sql.limit(opts.limit) - if (opts && opts.offset) sql = sql.offset(opts.offset) - - // execute query - var rows = await db.all(sql) - return rows.map(row => ({ - tag: row.tag, - count: +row.count - })) -} - -/** - * @description - * List media tags. - * - * @param {Object} [opts] - * @param {Object} [opts.filters] - * @param {string|string[]} [opts.filters.authors] - * @param {string|string[]} [opts.filters.subtypes] - * @param {string} [opts.filters.visibility] - * @param {string} [opts.sortBy] - * @param {number} [opts.offset=0] - * @param {number} [opts.limit] - * @param {boolean} [opts.reverse] - * @returns {Promise>} - */ -exports.listMediaTags = async function (opts) { - // TODO: handle visibility - // TODO: sortBy options - - // validate & parse params - if (opts && 'sortBy' in opts) assert(typeof opts.sortBy === 'string', 'SortBy must be a string') - if (opts && 'offset' in opts) assert(typeof opts.offset === 'number', 'Offset must be a number') - if (opts && 'limit' in opts) assert(typeof opts.limit === 'number', 'Limit must be a number') - if (opts && 'reverse' in opts) assert(typeof opts.reverse === 'boolean', 'Reverse must be a boolean') - if (opts && opts.filters) { - if ('authors' in opts.filters) { - if (Array.isArray(opts.filters.authors)) { - assert(opts.filters.authors.every(v => typeof v === 'string'), 'Authors filter must be a string or array of strings') - } else { - assert(typeof opts.filters.authors === 'string', 'Authors filter must be a string or array of strings') - opts.filters.authors = [opts.filters.authors] - } - opts.filters.authors = await Promise.all(opts.filters.authors.map(datLibrary.getPrimaryUrl)) - } - if ('subtypes' in opts.filters) { - if (Array.isArray(opts.filters.subtypes)) { - assert(opts.filters.subtypes.every(v => typeof v === 'string'), 'Subtypes filter must be a string or array of strings') - } else { - assert(typeof opts.filters.subtypes === 'string', 'Subtypes filter must be a string or array of strings') - opts.filters.subtypes = [opts.filters.subtypes] - } - opts.filters.subtypes = opts.filters.subtypes.map(normalizeSchemaUrl) - } - if ('visibility' in opts.filters) { - assert(typeof opts.filters.visibility === 'string', 'Visibility filter must be a string') - } - } - - // build query - var sql = knex('crawl_tags') - .select('crawl_tags.tag') - .select(knex.raw('count(crawl_tags.id) as count')) - .innerJoin('crawl_media_tags', 'crawl_media_tags.crawlTagId', '=', 'crawl_tags.id') - .innerJoin('crawl_media', 'crawl_media_tags.crawlMediaId', '=', 'crawl_media.id') - .leftJoin('crawl_sources', 'crawl_media.crawlSourceId', '=', 'crawl_sources.id') - .orderBy('crawl_tags.tag', opts.reverse ? 'DESC' : 'ASC') - .groupBy('crawl_tags.tag') - if (opts && opts.filters && opts.filters.authors) { - sql = sql.whereIn('crawl_sources.url', opts.filters.authors) - } - if (opts && opts.filters && opts.filters.subtypes) { - sql = sql.whereIn('crawl_media.subtype', opts.filters.subtypes) - } - if (opts && opts.limit) sql = sql.limit(opts.limit) - if (opts && opts.offset) sql = sql.offset(opts.offset) - - // execute query - var rows = await db.all(sql) - return rows.map(row => ({ - tag: row.tag, - count: +row.count - })) -} \ No newline at end of file diff --git a/dat/archives.js b/dat/archives.js new file mode 100644 index 00000000..c758748c --- /dev/null +++ b/dat/archives.js @@ -0,0 +1,554 @@ +const emitStream = require('emit-stream') +const EventEmitter = require('events') +const datEncoding = require('dat-encoding') +const parseDatURL = require('parse-dat-url') +const _debounce = require('lodash.debounce') +const pda = require('pauls-dat-api2') +const baseLogger = require('../logger').get() +const logger = baseLogger.child({category: 'dat', subcategory: 'archives'}) + +// dbs +const siteData = require('../dbs/sitedata') +const settingsDb = require('../dbs/settings') +const archivesDb = require('../dbs/archives') +const datDnsDb = require('../dbs/dat-dns') + +// dat modules +const daemon = require('./daemon') +const datAssets = require('./assets') + +// constants +// = + +const { + DAT_HASH_REGEX, + DAT_PRESERVED_FIELDS_ON_FORK +} = require('../lib/const') +const {InvalidURLError, TimeoutError} = require('beaker-error-constants') + +// typedefs +// = + +/** + * @typedef {import('../dbs/archives').LibraryArchiveRecord} LibraryArchiveRecord + * @typedef {import('./daemon').DaemonDatArchive} DaemonDatArchive + */ + +// globals +// = + +var archives = {} // in-memory cache of archive objects. key -> archive +var archiveLoadPromises = {} // key -> promise +var archiveSessionCheckouts = {} // key+version -> DaemonDatArchive +var archivesEvents = new EventEmitter() +// var daemonEvents TODO + +// exported API +// = + +exports.on = archivesEvents.on.bind(archivesEvents) +exports.addListener = archivesEvents.addListener.bind(archivesEvents) +exports.removeListener = archivesEvents.removeListener.bind(archivesEvents) + +/** + * @param {Object} opts + * @param {Object} opts.rpcAPI + * @param {Object} opts.datDaemonProcess + * @param {string[]} opts.disallowedSavePaths + * @return {Promise} + */ +exports.setup = async function setup ({rpcAPI, disallowedSavePaths}) { + // connect to the daemon + await daemon.setup() + + datDnsDb.on('updated', ({key, name}) => { + var archive = getArchive(key) + if (archive) { + archive.domain = name + } + }) + + // re-export events + // TODO + // daemonEvents.on('network-changed', evt => archivesEvents.emit('network-changed', evt)) + + // configure the bandwidth throttle + // TODO + // settingsDb.getAll().then(({dat_bandwidth_limit_up, dat_bandwidth_limit_down}) => { + // daemon.setBandwidthThrottle({ + // up: dat_bandwidth_limit_up, + // down: dat_bandwidth_limit_down + // }) + // }) + // settingsDb.on('set:dat_bandwidth_limit_up', up => daemon.setBandwidthThrottle({up})) + // settingsDb.on('set:dat_bandwidth_limit_down', down => daemon.setBandwidthThrottle({down})) + + logger.info('Initialized dat daemon') +} + +/** + * @returns {Promise} + */ +exports.loadSavedArchives = async function () { + // load all saved archives + var archives = await require('../filesystem/dat-library').list({isHosting: true}) + // HACK + // load the archives one at a time and give 5 seconds between each + // why: the purpose of loading saved archives is to seed them + // loading them all at once can bog down the user's device + // if the user tries to access an archive, Beaker will load it immediately + // so spacing out the loads has no visible impact on the user + // (except for reducing the overall load for the user) + // -prf + for (let a of archives) { + loadArchive(a.key) + await new Promise(r => setTimeout(r, 5e3)) // wait 5s + } +} + +/** + * @returns {NodeJS.ReadableStream} + */ +exports.createEventStream = function createEventStream () { + return emitStream.toStream(archivesEvents) +} + +/** + * @param {string} key + * @returns {Promise} + */ +exports.getDebugLog = function getDebugLog (key) { + return '' // TODO needed? daemon.getDebugLog(key) +} + +/** + * @returns {NodeJS.ReadableStream} + */ +exports.createDebugStream = function createDebugStream () { + // TODO needed? + // return daemon.createDebugStream() +} + +// read metadata for the archive, and store it in the meta db +const pullLatestArchiveMeta = exports.pullLatestArchiveMeta = async function pullLatestArchiveMeta (archive, {updateMTime} = {}) { + try { + var key = archive.key.toString('hex') + + // trigger DNS update + confirmDomain(key) + + // read the archive meta and size on disk + var [manifest, oldMeta, size] = await Promise.all([ + archive.pda.readManifest().catch(_ => {}), + archivesDb.getMeta(key), + archive.pda.readSize('/') + ]) + var {title, description, type, author, forkOf} = (manifest || {}) + var isOwner = archive.writable + var mtime = updateMTime ? Date.now() : oldMeta.mtime + var details = {title, description, type, mtime, size, author, forkOf, isOwner} + + // check for changes + if (!hasMetaChanged(details, oldMeta)) { + return + } + + // write the record + await archivesDb.setMeta(key, details) + + // emit the updated event + details.url = 'dat://' + key + archivesEvents.emit('updated', {key, details, oldMeta}) + return details + } catch (e) { + console.error('Error pulling meta', e) + } +} + +// archive creation +// = + +/** + * @returns {Promise} + */ +exports.createNewRootArchive = async function () { + var archive = await loadArchive(null, {visibility: 'private'}) + await pullLatestArchiveMeta(archive) + return archive +} + +/** + * @param {Object} [manifest] + * @returns {Promise} + */ +const createNewArchive = exports.createNewArchive = async function (manifest = {}) { + // create the archive + var archive = await loadArchive(null) + + // write the manifest and default datignore + await Promise.all([ + archive.pda.writeManifest(manifest), + archive.pda.writeFile('/.datignore', await settingsDb.get('default_dat_ignore'), 'utf8') + ]) + + // save the metadata + await pullLatestArchiveMeta(archive) + + return archive +} + +/** + * @param {string} srcArchiveUrl + * @param {Object} [manifest] + * @returns {Promise} + */ +exports.forkArchive = async function forkArchive (srcArchiveUrl, manifest = {}) { + srcArchiveUrl = fromKeyToURL(srcArchiveUrl) + + // get the source archive + var srcArchive + var downloadRes = await Promise.race([ + (async function () { + srcArchive = await getOrLoadArchive(srcArchiveUrl) + if (!srcArchive) { + throw new Error('Invalid archive key') + } + return srcArchive.pda.download('/') + })(), + new Promise(r => setTimeout(() => r('timeout'), 60e3)) + ]) + if (downloadRes === 'timeout') { + throw new TimeoutError('Timed out while downloading source archive') + } + + // fetch source archive meta + var srcManifest = await srcArchive.pda.readManifest().catch(_ => {}) + srcManifest = srcManifest || {} + + // override any manifest data + var dstManifest = { + title: (manifest.title) ? manifest.title : srcManifest.title, + description: (manifest.description) ? manifest.description : srcManifest.description, + type: (manifest.type) ? manifest.type : srcManifest.type, + author: manifest.author, + forkOf: srcArchiveUrl + } + DAT_PRESERVED_FIELDS_ON_FORK.forEach(field => { + if (srcManifest[field]) { + dstManifest[field] = srcManifest[field] + } + }) + + // create the new archive + var dstArchive = await createNewArchive(dstManifest) + + // copy files + var ignore = ['/.dat', '/.git', '/dat.json'] + await pda.exportArchiveToArchive({ + srcArchive: srcArchive.session.drive, + dstArchive: dstArchive.session.drive, + skipUndownloadedFiles: true, + ignore + }) + + // write a .datignore if DNE + try { + await dstArchive.pda.stat('/.datignore') + } catch (e) { + await dstArchive.pda.writeFile('/.datignore', await settingsDb.get('default_dat_ignore'), 'utf8') + } + + return dstArchive +} + +// archive management +// = + +const loadArchive = exports.loadArchive = async function loadArchive (key, settingsOverride) { + // validate key + if (key) { + if (!Buffer.isBuffer(key)) { + // existing dat + key = await fromURLToKey(key, true) + if (!DAT_HASH_REGEX.test(key)) { + throw new InvalidURLError() + } + key = datEncoding.toBuf(key) + } + } + + // fallback to the promise, if possible + var keyStr = key ? datEncoding.toStr(key) : null + if (keyStr && keyStr in archiveLoadPromises) { + return archiveLoadPromises[keyStr] + } + + // run and cache the promise + var p = loadArchiveInner(key, settingsOverride) + if (key) archiveLoadPromises[keyStr] = p + p.catch(err => { + console.error('Failed to load archive', keyStr, err.toString()) + }) + + // when done, clear the promise + if (key) { + const clear = () => delete archiveLoadPromises[keyStr] + p.then(clear, clear) + } + + return p +} + +// main logic, separated out so we can capture the promise +async function loadArchiveInner (key, settingsOverride) { + // ensure the folders exist + // TODO needed? + // var metaPath = archivesDb.getArchiveMetaPath(key) + // mkdirp.sync(metaPath) + + // create the archive session with the daemon + var archive = await daemon.createDatArchiveSession({key}) + key = archive.key + var keyStr = datEncoding.toStr(archive.key) + + // fetch library settings + var userSettings = require('../filesystem/dat-library').getConfig(keyStr) + if (!userSettings) { + if (require('../filesystem/users').isUser(archive.url)) { + userSettings = {key: keyStr, isSaved: true, isHosting: true, visibility: 'unlisted', savedAt: null, meta: null} + } + } + if (settingsOverride) { + userSettings = Object.assign(userSettings || {}, settingsOverride) + } + + // put the archive on the network + if (!userSettings || userSettings.visibility !== 'private') { + archive.session.publish() + } + + // fetch dns name if known + let dnsRecord = await datDnsDb.getCurrentByKey(datEncoding.toStr(key)) + archive.domain = dnsRecord ? dnsRecord.name : undefined + + // update db + archivesDb.touch(archive.key).catch(err => console.error('Failed to update lastAccessTime for archive', archive.key, err)) + await pullLatestArchiveMeta(archive) + datAssets.update(archive) + + // wire up events + archive.pullLatestArchiveMeta = opts => pullLatestArchiveMeta(archive, opts) + archive.fileActStream = archive.pda.watch('/') + archive.fileActStream.on('data', _debounce(([event, {path}]) => { + if (event !== 'changed') return + archive.pullLatestArchiveMeta({updateMTime: true}) + datAssets.update(archive) + }), 1e3) + + // now store in main archives listing, as loaded + archives[keyStr] = archive + return archive +} + +const getArchive = exports.getArchive = function getArchive (key) { + key = fromURLToKey(key) + return archives[key] +} + +exports.getArchiveCheckout = async function getArchiveCheckout (archive, version) { + var isHistoric = false + var checkoutFS = archive + if (typeof version !== 'undefined' && version !== null) { + let seq = parseInt(version) + if (Number.isNaN(seq)) { + if (version === 'latest') { + // ignore, we use latest by default + } else { + throw new Error('Invalid version identifier:' + version) + } + } else { + let checkoutKey = `${archive.key}+${version}` + if (!(checkoutKey in archiveSessionCheckouts)) { + archiveSessionCheckouts[checkoutKey] = await daemon.createDatArchiveSession({ + key: archive.key, + version, + writable: false + }) + } + checkoutFS = archiveSessionCheckouts[checkoutKey] + checkoutFS.domain = archive.domain + isHistoric = true + } + } + return {isHistoric, checkoutFS} +} + +exports.getActiveArchives = function getActiveArchives () { + return archives +} + +const getOrLoadArchive = exports.getOrLoadArchive = async function getOrLoadArchive (key) { + key = await fromURLToKey(key, true) + var archive = getArchive(key) + if (archive) { + return archive + } + return loadArchive(key) +} + +exports.unloadArchive = async function unloadArchive (key) { + key = await fromURLToKey(key, true) + var archive = archives[key] + if (!archive) return + if (archive.fileActStream) { + archive.fileActStream.close() + archive.fileActStream = null + } + delete archives[key] + archive.session.unpublish() + archive.session.close() +} + +const isArchiveLoaded = exports.isArchiveLoaded = function isArchiveLoaded (key) { + key = fromURLToKey(key) + return key in archives +} + +// archive fetch/query +// = + +exports.getArchiveInfo = async function getArchiveInfo (key) { + // get the archive + key = await fromURLToKey(key, true) + var archive = await getOrLoadArchive(key) + + // fetch archive data + var userSettings = require('../filesystem/dat-library').getConfig(key) + var [meta, manifest, archiveInfo] = await Promise.all([ + archivesDb.getMeta(key), + archive.pda.readManifest().catch(_ => {}), + archive.getInfo() + ]) + manifest = manifest || {} + meta.key = key + meta.url = archive.url + meta.domain = archive.domain + meta.links = manifest.links || {} + meta.manifest = manifest + meta.version = archiveInfo.version + meta.userSettings = { + isSaved: userSettings ? true : false, + isHosting: userSettings ? userSettings.isHosting : false, + visibility: userSettings ? userSettings.visibility : undefined, + savedAt: userSettings ? userSettings.savedAt : null + } + meta.peers = archiveInfo.peers + meta.networkStats = archiveInfo.networkStats + + return meta +} + +exports.getArchiveNetworkStats = async function getArchiveNetworkStats (key) { + key = await fromURLToKey(key, true) + return {} // TODO daemon.getArchiveNetworkStats(key) +} + +exports.clearFileCache = async function clearFileCache (key) { + return {} // TODO daemon.clearFileCache(key, userSettings) +} + +/** + * @desc + * Get the primary URL for a given dat URL + * + * @param {string} url + * @returns {Promise} + */ +const getPrimaryUrl = exports.getPrimaryUrl = async function (url) { + var key = await fromURLToKey(url, true) + var datDnsRecord = await datDnsDb.getCurrentByKey(key) + if (!datDnsRecord) return `dat://${key}` + return `dat://${datDnsRecord.name}` +} + +/** + * @desc + * Check that the archive's dat.json `domain` matches the current DNS + * If yes, write the confirmed entry to the dat_dns table + * + * @param {string} key + * @returns {Promise} + */ +const confirmDomain = exports.confirmDomain = async function (key) { + // fetch the current domain from the manifest + try { + var archive = await getOrLoadArchive(key) + var datJson = await archive.pda.readManifest() + } catch (e) { + return false + } + if (!datJson.domain) { + await datDnsDb.unset(key) + return false + } + + // confirm match with current DNS + var dnsKey = await require('./dns').resolveName(datJson.domain) + if (key !== dnsKey) { + await datDnsDb.unset(key) + return false + } + + // update mapping + await datDnsDb.update({name: datJson.domain, key}) + return true +} + +// helpers +// = + +const fromURLToKey = exports.fromURLToKey = function fromURLToKey (url, lookupDns = false) { + if (Buffer.isBuffer(url)) { + return url + } + if (DAT_HASH_REGEX.test(url)) { + // simple case: given the key + return url + } + + var urlp = parseDatURL(url) + + // validate + if (urlp.protocol !== 'dat:') { + throw new InvalidURLError('URL must be a dat: scheme') + } + if (!DAT_HASH_REGEX.test(urlp.host)) { + if (!lookupDns) { + throw new InvalidURLError('Hostname is not a valid hash') + } + return require('./dns').resolveName(urlp.host) + } + + return urlp.host +} + +const fromKeyToURL = exports.fromKeyToURL = function fromKeyToURL (key) { + if (typeof key !== 'string') { + key = datEncoding.toStr(key) + } + if (!key.startsWith('dat://')) { + return `dat://${key}/` + } + return key +} + +function hasMetaChanged (m1, m2) { + for (let k of ['title', 'description', 'type', 'size', 'author', 'forkOf']) { + if (!m1[k]) m1[k] = undefined + if (!m2[k]) m2[k] = undefined + if (m1[k] !== m2[k]) { + return true + } + } + return false +} \ No newline at end of file diff --git a/dat/assets.js b/dat/assets.js index 13a12ad9..a663a3f1 100644 --- a/dat/assets.js +++ b/dat/assets.js @@ -13,7 +13,7 @@ const IDEAL_FAVICON_SIZE = 64 // = /** - * @typedef {import('./library').InternalDatArchive} InternalDatArchive + * @typedef {import('./daemon').DaemonDatArchive} DaemonDatArchive */ // globals @@ -32,7 +32,7 @@ exports.removeListener = events.removeListener.bind(events) * @description * Crawl the given site for assets. * - * @param {InternalDatArchive} archive - site to crawl. + * @param {DaemonDatArchive} archive - site to crawl. * @param {string[]?} filenames - which files to check. * @returns {Promise} */ @@ -73,7 +73,7 @@ function extractAssetType (pathname) { /** * Reads the asset file as a dataurl * - Converts any .ico to .png - * @param {InternalDatArchive} archive + * @param {DaemonDatArchive} archive * @param {string} pathname * @returns string The asset as a data URL */ diff --git a/dat/daemon.js b/dat/daemon.js new file mode 100644 index 00000000..b28d45b9 --- /dev/null +++ b/dat/daemon.js @@ -0,0 +1,147 @@ +const HyperdriveDaemon = require('hyperdrive-daemon') +const { createMetadata } = require('hyperdrive-daemon/lib/metadata') +const constants = require('hyperdrive-daemon-client/lib/constants') +const { HyperdriveClient } = require('hyperdrive-daemon-client') +const datEncoding = require('dat-encoding') +const pda = require('pauls-dat-api2') + +const SETUP_RETRIES = 10 + +// typedefs +// = + +/** +* @typedef {Object} DaemonDatArchive +* @prop {number} sessionId +* @prop {Buffer} key +* @prop {string} url +* @prop {string} domain +* @prop {boolean} writable +* @prop {Object} session +* @prop {Object} session.drive +* @prop {function(): Promise} session.close +* @prop {function(): Promise} session.publish +* @prop {function(): Promise} session.unpublish +* @prop {function(): Promise} getInfo +* @prop {DaemonDatArchivePDA} pda +* +* @typedef {Object} DaemonDatArchivePDA +* @prop {function(string): Promise} stat +* @prop {function(string, Object=): Promise} readFile +* @prop {function(string, Object=): Promise>} readdir +* @prop {function(string): Promise} readSize +* @prop {function(string, any, Object=): Promise} writeFile +* @prop {function(string): Promise} mkdir +* @prop {function(string, string): Promise} copy +* @prop {function(string, string): Promise} rename +* @prop {function(string): Promise} unlink +* @prop {function(string, Object=): Promise} rmdir +* @prop {function(string, string|Buffer): Promise} mount +* @prop {function(string): Promise} unmount +* @prop {function(string=): Promise} download +* @prop {function(string=): NodeJS.ReadableStream} watch +* @prop {function(): NodeJS.ReadableStream} createNetworkActivityStream +* @prop {function(): Promise} readManifest +* @prop {function(Object): Promise} writeManifest +* @prop {function(Object): Promise} updateManifest +*/ + +// globals +// = + +var client // client object created by hyperdrive-daemon-client + +// exported apis +// = + +exports.setup = async function () { + // instantiate the daemon + // TODO the daemon should be managed in an external process + await createMetadata(`localhost:${constants.port}`) + var daemon = new HyperdriveDaemon() + await daemon.start() + process.on('exit', () => daemon.stop()) + + for (let i = 0; i < SETUP_RETRIES; i++) { + try { + client = new HyperdriveClient() + await client.ready() + } catch (e) { + console.log('Failed to connect to daemon, retrying', e) + } + } + + // attach to the process for debugging + process.hyperdriveDaemon = daemon + process.hyperdriveDaemonClient = client +} + +/** + * Creates a dat-archive interface to the daemon for the given key + * + * @param {Object} opts + * @param {Buffer} opts.key + * @param {number} [opts.version] + * @param {Buffer} [opts.hash] + * @param {boolean} [opts.writable] + * @returns {Promise} + */ +exports.createDatArchiveSession = async function (opts) { + const drive = await client.drive.get(opts) + const key = datEncoding.toStr(drive.key) + var datArchive = { + key: datEncoding.toBuf(key), + url: `dat://${key}`, + writable: drive.writable, + domain: undefined, + + session: { + drive, + async close () { + return drive.close() + }, + async publish () { + return drive.publish() + }, + async unpublish () { + return drive.unpublish() + } + }, + + async getInfo () { + var [version, stats] = await Promise.all([ + drive.version(), + drive.stats() + ]) + return { + version, + peers: stats[0].metadata.peers, + networkStats: { + uploadTotal: stats[0].metadata.uploadedBytes + stats[0].content.uploadedBytes, + downloadTotal: stats[0].metadata.downloadedBytes + stats[0].content.downloadedBytes, + } + } + }, + + pda: createDatArchiveSessionPDA(drive) + } + return /** @type DaemonDatArchive */(datArchive) +} + +// internal methods +// = + +/** + * Provides a pauls-dat-api2 object for the given archive + * @param {Object} datArchive + * @returns {DaemonDatArchivePDA} + */ +function createDatArchiveSessionPDA (datArchive) { + var obj = {} + for (let k in pda) { + if (typeof pda[k] === 'function') { + obj[k] = pda[k].bind(pda, datArchive) + } + } + return obj +} \ No newline at end of file diff --git a/dat/daemon/extensions.js b/dat/daemon/extensions.js deleted file mode 100644 index 32e77ef2..00000000 --- a/dat/daemon/extensions.js +++ /dev/null @@ -1,189 +0,0 @@ -const EventEmitter = require('events') -const emitStream = require('emit-stream') -const {DatSessionDataExtMsg} = require('@beaker/dat-session-data-ext-msg') -const {DatEphemeralExtMsg} = require('@beaker/dat-ephemeral-ext-msg') - -// globals -// = - -var datSessionDataExtMsg = new DatSessionDataExtMsg() -var datEphemeralExtMsg = new DatEphemeralExtMsg() - -// exported api -// = - -function setup () { - datEphemeralExtMsg.on('message', onEphemeralMsg) - datSessionDataExtMsg.on('session-data', onSessionDataMsg) -} -exports.setup = setup - -// call this on every archive created in the library -function attach (archive) { - datEphemeralExtMsg.watchDat(archive) - datSessionDataExtMsg.watchDat(archive) - archive._datPeersEvents = new EventEmitter() - archive._datPeersOnPeerAdd = (peer) => onPeerAdd(archive, peer) - archive._datPeersOnPeerRemove = (peer) => onPeerRemove(archive, peer) - archive.metadata.on('peer-add', archive._datPeersOnPeerAdd) - archive.metadata.on('peer-remove', archive._datPeersOnPeerRemove) -} -exports.attach = attach - -// call this on every archive destroyed in the library -function detach (archive) { - datEphemeralExtMsg.unwatchDat(archive) - datSessionDataExtMsg.unwatchDat(archive) - delete archive._datPeersEvents - archive.metadata.removeListener('peer-add', archive._datPeersOnPeerAdd) - archive.metadata.removeListener('peer-remove', archive._datPeersOnPeerRemove) -} -exports.detach = detach - -// impl for datPeers.list() -function listPeers (archive) { - return archive.metadata.peers.map(internalPeerObj => createWebAPIPeerObj(archive, internalPeerObj)) -} -exports.listPeers = listPeers - -// impl for datPeers.getPeer(peerId) -function getPeer (archive, peerId) { - var internalPeerObj = archive.metadata.peers.find(internalPeerObj => getPeerId(internalPeerObj) === peerId) - return createWebAPIPeerObj(archive, internalPeerObj) -} -exports.getPeer = getPeer - -// impl for datPeers.broadcast(msg) -function broadcastEphemeralMessage (archive, payload) { - datEphemeralExtMsg.broadcast(archive, encodeEphemeralMsg(payload)) -} -exports.broadcastEphemeralMessage =broadcastEphemeralMessage - -// impl for datPeers.send(peerId, msg) -function sendEphemeralMessage (archive, peerId, payload) { - datEphemeralExtMsg.send(archive, peerId, encodeEphemeralMsg(payload)) -} -exports.sendEphemeralMessage = sendEphemeralMessage - -// impl for datPeers.getSessionData() -function getSessionData (archive) { - return decodeSessionData(datSessionDataExtMsg.getLocalSessionData(archive)) -} -exports.getSessionData = getSessionData - -// impl for datPeers.getSessionData(data) -function setSessionData (archive, sessionData) { - datSessionDataExtMsg.setLocalSessionData(archive, encodeSessionData(sessionData)) -} -exports.setSessionData = setSessionData - -function createDatPeersStream (archive) { - return emitStream(archive._datPeersEvents) -} -exports.createDatPeersStream = createDatPeersStream - -// events -// = - -function onPeerAdd (archive, internalPeerObj) { - if (getPeerId(internalPeerObj)) onHandshook() - else internalPeerObj.stream.stream.on('handshake', onHandshook) - - function onHandshook () { - var peerId = getPeerId(internalPeerObj) - - // send session data - if (datSessionDataExtMsg.getLocalSessionData(archive)) { - datSessionDataExtMsg.sendLocalSessionData(archive, peerId) - } - - // emit event - archive._datPeersEvents.emit('connect', { - peerId, - sessionData: getPeerSessionData(archive, peerId) - }) - } -} - -function onPeerRemove (archive, internalPeerObj) { - var peerId = getPeerId(internalPeerObj) - if (peerId) { - archive._datPeersEvents.emit('disconnect', { - peerId, - sessionData: getPeerSessionData(archive, peerId) - }) - } -} - -function onEphemeralMsg (archive, internalPeerObj, msg) { - var peerId = getPeerId(internalPeerObj) - archive._datPeersEvents.emit('message', { - peerId, - sessionData: getPeerSessionData(archive, peerId), - message: decodeEphemeralMsg(msg) - }) -} - -function onSessionDataMsg (archive, internalPeerObj, sessionData) { - archive._datPeersEvents.emit('session-data', { - peerId: getPeerId(internalPeerObj), - sessionData: decodeSessionData(sessionData) - }) -} - -// internal methods -// = - -function getPeerId (internalPeerObj) { - var feedStream = internalPeerObj.stream - var protocolStream = feedStream.stream - return protocolStream.remoteId ? protocolStream.remoteId.toString('hex') : null -} - -function getPeerSessionData (archive, peerId) { - return decodeSessionData(datSessionDataExtMsg.getSessionData(archive, peerId)) -} - -function createWebAPIPeerObj (archive, internalPeerObj) { - var id = getPeerId(internalPeerObj) - var sessionData = getPeerSessionData(archive, id) - return {id, sessionData} -} - -function encodeEphemeralMsg (payload) { - var contentType - if (Buffer.isBuffer(payload)) { - contentType = 'application/octet-stream' - } else { - contentType = 'application/json' - payload = Buffer.from(JSON.stringify(payload), 'utf8') - } - return {contentType, payload} -} - -function decodeEphemeralMsg (msg) { - var payload - if (msg.contentType === 'application/json') { - try { - payload = JSON.parse(msg.payload.toString('utf8')) - } catch (e) { - console.error('Failed to parse ephemeral message', e, msg) - payload = null - } - } - return payload -} - -function encodeSessionData (obj) { - return Buffer.from(JSON.stringify(obj), 'utf8') -} - -function decodeSessionData (sessionData) { - if (!sessionData || sessionData.length === 0) return null - try { - return JSON.parse(sessionData.toString('utf8')) - } catch (e) { - console.error('Failed to parse local session data', e, sessionData) - return null - } -} \ No newline at end of file diff --git a/dat/daemon/folder-sync.js b/dat/daemon/folder-sync.js deleted file mode 100644 index b79e5eee..00000000 --- a/dat/daemon/folder-sync.js +++ /dev/null @@ -1,463 +0,0 @@ -const bytes = require('bytes') -const dft = require('diff-file-tree') -const diff = require('diff') -const anymatch = require('anymatch') -const fs = require('fs') -const path = require('path') -const EventEmitter = require('events') -const pda = require('pauls-dat-api') -const mkdirp = require('mkdirp') -const {toAnymatchRules} = require('@beaker/datignore') -const logger = require('./logger').child({category: 'dat', subcategory: 'folder-sync'}) -const {isFileNameBinary, isFileContentBinary} = require('../../lib/mime') -const lock = require('../../lib/lock') -const scopedFSes = require('../../lib/scoped-fses') -const { - NotFoundError, - NotAFolderError, - ProtectedFileNotWritableError, - ArchiveNotWritableError, - InvalidEncodingError, - SourceTooLargeError -} = require('beaker-error-constants') - -const MAX_DIFF_SIZE = bytes('100kb') - -// globals -// = - -var disallowedSavePaths = [] - -// exported api -// = - -const events = exports.events = new EventEmitter() - -exports.setup = function (opts) { - disallowedSavePaths = opts.disallowedSavePaths -} - -// sync dat to the folder -// - opts -// - shallow: bool, dont descend into changed folders (default true) -// - compareContent: bool, compare the actual content (default true) -// - paths: Array, a whitelist of files to compare -// - localSyncPath: string, override the archive localSyncPath -// - addOnly: bool, dont modify or remove any files (default false) -const syncArchiveToFolder = exports.syncArchiveToFolder = function (archive, opts = {}) { - opts = opts || {} - return sync(archive, false, opts) -} - -// sync folder to the dat -// - opts -// - shallow: bool, dont descend into changed folders (default true) -// - compareContent: bool, compare the actual content (default true) -// - paths: Array, a whitelist of files to compare -// - localSyncPath: string, override the archive localSyncPath -// - addOnly: bool, dont modify or remove any files (default false) -const syncFolderToArchive = exports.syncFolderToArchive = function (archive, opts = {}) { - opts = opts || {} - if (!archive.writable) throw new ArchiveNotWritableError() - return sync(archive, true, opts) -} - -// helper to wait for sync on an archive to be finished -const ensureSyncFinished = exports.ensureSyncFinished = async function (archive) { - var isFinished - var release = await getArchiveSyncLock(archive) - try { isFinished = (archive._activeSyncs == 0) } - finally { release() } - if (!isFinished) { - return ensureSyncFinished(archive) // check again - } -} - -// queue a sync event from folder->archive or archive->folder -// - debounces the sync event with a 500ms timeout -// - call with toFolder: true to sync from archive->folder -// - call with toArchive: true to sync from folder->archive -// - if both toFolder && toArchive are queued, toArchive wins (local folder wins) -// - this *will* result in lost changes in the archive if simultaneous changes happen in the local folder, -// but it creates very deterministic results -const queueSyncEvent = exports.queueSyncEvent = function (archive, {toFolder, toArchive}) { - if (!archive.syncEventQueue) { - archive.syncEventQueue = newQueueObj() - } - - // ignore if currently syncing - if (archive.syncEventQueue.isSyncing) return logger.silly('Already syncing, ignored') - - // debounce the handler - if (archive.syncEventQueue.timeout) { - clearTimeout(archive.syncEventQueue.timeout) - } - - // queue - if (toFolder) archive.syncEventQueue.toFolder = true - if (toArchive) archive.syncEventQueue.toArchive = true - archive.syncEventQueue.timeout = setTimeout(async () => { - const localSyncPath = archive.localSyncSettings.path - const {toArchive, toFolder} = archive.syncEventQueue - - // lock - archive.syncEventQueue.isSyncing = true - logger.silly('Ok timed out, beginning sync', {details: {toArchive, toFolder}}) - - try { - let st = await stat(fs, localSyncPath) - if (!st) { - // folder has been removed - archive.stopWatchingLocalFolder() - archive.stopWatchingLocalFolder = null - logger.warn('Local sync folder not found, aborting watch', {details: {path: localSyncPath}}) - return - } - // sync with priority given to the local folder - if (toArchive) await syncFolderToArchive(archive, {localSyncPath, shallow: false}) - else if (toFolder) await syncArchiveToFolder(archive, {localSyncPath, shallow: false}) - } catch (e) { - logger.error('Error syncing folder', {details: {path: localSyncPath, error: e.toString()}}) - if (e.name === 'CycleError') { - events.emit('error', archive.key, e) - } - } finally { - // reset the queue - archive.syncEventQueue = newQueueObj() - } - }, 500) -} -function newQueueObj () { - return {timeout: null, toFolder: false, toArchive: false, isSyncing: false} -} - -// attach/detach a watcher on the local folder and sync it to the dat -exports.configureFolderToArchiveWatcher = async function (archive) { - // HACKish - // it's possible that configureFolderToArchiveWatcher() could be called multiple times in sequence - // (for instance because of multiple settings changes) - // this is problematic because the method is async, and a previous call may still be in progress - // shouldAbort() tracks whether such an event has occurred and lets you drop out - // put this after every await: - // - // if (shouldAbort()) return - // - // -prf - var callCount = archive.folderSyncConfig_CallCount = (archive.folderSyncConfig_CallCount || 0) + 1 - const shouldAbort = () => callCount !== archive.folderSyncConfig_CallCount - - // teardown the existing watch (his watch has ended) - // = - - if (archive.stopWatchingLocalFolder) { - // stop watching - archive.stopWatchingLocalFolder() - archive.stopWatchingLocalFolder = null - if (archive.syncEventQueue && archive.syncEventQueue.timeout) { - clearTimeout(archive.syncEventQueue.timeout) - archive.syncEventQueue = null - } - } - if (archive.stopWatchingDatIgnore) { - archive.stopWatchingDatIgnore() - archive.stopWatchingDatIgnore = null - } - - // start a new watch - // = - - if (archive.localSyncSettings) { - logger.silly('Configuring archive sync', {details: {key: archive.key.toString('hex'), settings: archive.localSyncSettings}}) - - // create diff cache - archive._compareContentCache = {} - - // create internal folder if needed - if (archive.localSyncSettings.isUsingInternal) { - mkdirp.sync(archive.localSyncSettings.path) - } - - // make sure the folder exists - let st = await stat(fs, archive.localSyncSettings.path) - if (shouldAbort()) return - if (!st) { - logger.warn('Local sync folder not found, aborting watch', {details: {path: archive.localSyncSettings.path}}) - } - var scopedFS = scopedFSes.get(archive.localSyncSettings.path) - - // track datignore rules - readDatIgnore(scopedFS).then(rules => { archive.datIgnoreRules = rules }) - archive.stopWatchingDatIgnore = scopedFS.watch('/.datignore', async () => { - archive.datIgnoreRules = await readDatIgnore(scopedFS) - }) - - if (!archive.localSyncSettings.autoPublish) { - // no need to setup watcher - // just do an add-only sync from archive->folder - await sync(archive, false, {shallow: false, addOnly: true}) - if (shouldAbort()) return - } else { - // sync up - try { - await mergeArchiveAndFolder(archive, archive.localSyncSettings.path) - } catch (err) { - logger.error('Failed to merge local sync folder', {details: {err}}) - } - if (shouldAbort()) return - - // start watching - archive.stopWatchingLocalFolder = scopedFS.watch('/', path => { - // TODO - // it would be possible to make this more efficient by ignoring changes that match .datignore - // but you need to make sure you have the latest .datignore and reading that on every change-event isnt efficient - // so you either need to: - // A. queue up all the changed paths, then read the datignore inside the timeout and filter, if filteredList.length === 0 then abort - // B. maintain an in-memory copy of the datignore and keep it up-to-date, and then check at time of the event - // -prf - - logger.silly('Change detected', {details: {path}}) - queueSyncEvent(archive, {toArchive: true}) - }) - } - } else { - // clear diff cache - archive._compareContentCache = {} - } -} - -// list the files that differ -// - opts -// - shallow: bool, dont descend into changed folders (default true) -// - compareContent: bool, compare the actual content (default true) -// - paths: Array, a whitelist of files to compare -// - localSyncPath: string, override the archive localSyncPath -exports.diffListing = async function (archive, opts = {}) { - opts = opts || {} - var localSyncPath = opts.localSyncPath || (archive.localSyncSettings && archive.localSyncSettings.path) - if (!localSyncPath) return logger.warn('Sanity check failed - diffListing() aborting, no localSyncPath') - var scopedFS = scopedFSes.get(localSyncPath) - opts = massageDiffOpts(opts) - - // build ignore rules - if (opts.paths) { - opts.filter = makeDiffFilterByPaths(opts.paths) - } else { - const ignoreRules = await readDatIgnore(scopedFS) - opts.filter = (filepath) => anymatch(ignoreRules, filepath) - } - - // run diff - opts.compareContentCache = archive._compareContentCache - return dft.diff({fs: scopedFS}, {fs: archive}, opts) -} - -// diff an individual file -// - filepath: string, the path of the file in the archive/folder -exports.diffFile = async function (archive, filepath) { - if (!archive.localSyncSettings.path) return logger.warn('Sanity check failed - diffFile() aborting, no localSyncPath') - var scopedFS = scopedFSes.get(archive.localSyncSettings.path) - filepath = path.normalize(filepath) - - // check the filename to see if it's binary - var isBinary = isFileNameBinary(filepath) - if (isBinary === true) { - throw new InvalidEncodingError('Cannot diff a binary file') - } - - // make sure we can handle the buffers involved - let st - st = await stat(scopedFS, filepath) - if (isBinary !== false && st && st.isFile() && await isFileContentBinary(scopedFS, filepath)) { - throw new InvalidEncodingError('Cannot diff a binary file') - } - if (st && st.isFile() && st.size > MAX_DIFF_SIZE) { - throw new SourceTooLargeError() - } - st = await stat(archive, filepath) - if (isBinary !== false && st && st.isFile() && await isFileContentBinary(archive, filepath)) { - throw new InvalidEncodingError('Cannot diff a binary file') - } - if (st && st.isFile() && st.size > MAX_DIFF_SIZE) { - throw new SourceTooLargeError() - } - - // read the file in both sources - const [newFile, oldFile] = await Promise.all([readFile(scopedFS, filepath), readFile(archive, filepath)]) - - // return the diff - return diff.diffLines(oldFile, newFile) -} - -// validate a path to be used for sync -exports.assertSafePath = async function (p) { - // check whether this is an OS path - for (let disallowedSavePath of disallowedSavePaths) { - if (path.normalize(p) === path.normalize(disallowedSavePath)) { - throw new ProtectedFileNotWritableError(`This is a protected folder. Please pick another folder or subfolder.`) - } - } - - // stat the folder - const stat = await new Promise(resolve => { - fs.stat(p, (_, st) => resolve(st)) - }) - - if (!stat) { - throw new NotFoundError() - } - - if (!stat.isDirectory()) { - throw new NotAFolderError('Invalid target folder: not a folder') - } -} - -// read a datignore from a fs space and turn it into anymatch rules -const readDatIgnore = exports.readDatIgnore = async function (fs) { - var rulesRaw = await readFile(fs, '.datignore') - return toAnymatchRules(rulesRaw) -} - -// filter function used by scoped-fs to hide files in the datignore -exports.applyDatIgnoreFilter = function (archive, filepath) { - const datIgnoreRules = archive.datIgnoreRules || toAnymatchRules('') - var filepaths = explodeFilePaths(filepath) // we need to check parent paths in addition to the target path - var res = filepaths.filter(p => anymatch(datIgnoreRules, p)).length === 0 - return res -} - -// merge the dat.json in the folder and then merge files, with preference to folder files -const mergeArchiveAndFolder = exports.mergeArchiveAndFolder = async function (archive, localSyncPath) { - logger.silly('Merging archive and folder', {details: {path: localSyncPath, key: archive.key.toString('hex')}}) - const readManifest = async (fs) => { - try { return await pda.readManifest(fs) } catch (e) { return {} } - } - var localFS = scopedFSes.get(localSyncPath) - var localManifest = await readManifest(localFS) - var archiveManifest = await readManifest(archive) - var mergedManifest = Object.assign(archiveManifest || {}, localManifest || {}) - await pda.writeManifest(localFS, mergedManifest) - await sync(archive, false, {localSyncPath, shallow: false, addOnly: true}) // archive -> folder (add-only) - await sync(archive, true, {localSyncPath, shallow: false}) // folder -> archive - events.emit('merge:' + archive.key.toString('hex'), archive.key) - logger.silly('Done merging archive and folder', {details: {path: localSyncPath, key: archive.key.toString('hex')}}) -} - -// internal methods -// = - -// sync the dat & folder content -// - toArchive: true to sync folder to archive, false to sync archive to folder -// - opts -// - shallow: bool, dont descend into changed folders (default true) -// - compareContent: bool, compare the actual content (default true) -// - paths: Array, a whitelist of files to compare -// - localSyncPath: string, override the archive localSyncPath -// - addOnly: bool, dont modify or remove any files (default false) -async function sync (archive, toArchive, opts = {}) { - opts = opts || {} - var localSyncPath = opts.localSyncPath || (archive.localSyncSettings && archive.localSyncSettings.path) - if (!localSyncPath) return logger.warn('Sanity check failed - sync() aborting, no localSyncPath') - - archive._activeSyncs = (archive._activeSyncs || 0) + 1 - var release = await getArchiveSyncLock(archive) - try { - var scopedFS = scopedFSes.get(localSyncPath) - opts = massageDiffOpts(opts) - - // build ignore rules - if (opts.paths) { - opts.filter = makeDiffFilterByPaths(opts.paths) - } else { - let ignoreRules = await readDatIgnore(scopedFS) - opts.filter = (filepath) => anymatch(ignoreRules, filepath) - } - - // choose direction - var left = toArchive ? {fs: scopedFS} : {fs: archive} - var right = toArchive ? {fs: archive} : {fs: scopedFS} - - // run diff - opts.compareContentCache = archive._compareContentCache - var diff = await dft.diff(left, right, opts) - if (opts.addOnly) { - diff = diff.filter(d => d.change === 'add') - } - logger.silly(`Syncing to ${toArchive ? 'archive' : 'folder'}`, {details: {key: archive.key.toString('hex'), path: localSyncPath}}) - - // sync data - await dft.applyRight(left, right, diff) - events.emit('sync', archive.key, toArchive ? 'archive' : 'folder') - events.emit('sync:' + archive.key.toString('hex'), archive.key, toArchive ? 'archive' : 'folder') - - // decrement active syncs - archive._activeSyncs-- - } catch (err) { - logger.error('Failed to sync archive to local path', {details: {key: archive.key.toString('hex'), path: localSyncPath, err: err.toString()}}) - } finally { - release() - } -} - -function getArchiveSyncLock (archive) { - return lock('sync:' + archive.key.toString('hex')) -} - -function makeDiffFilterByPaths (targetPaths) { - targetPaths = targetPaths.map(path.normalize) - return (filepath) => { - for (let i = 0; i < targetPaths.length; i++) { - let targetPath = targetPaths[i] - - if (targetPath.endsWith(path.sep)) { - // a directory - if (filepath === targetPath.slice(0, -1)) return false // the directory itself - if (filepath.startsWith(targetPath)) return false // a file within the directory - } else { - // a file - if (filepath === targetPath) return false - } - if (targetPath.startsWith(filepath) && targetPath.charAt(filepath.length) === path.sep) { - return false // a parent folder - } - } - return true - } -} - -function massageDiffOpts (opts) { - return { - compareContent: typeof opts.compareContent === 'boolean' ? opts.compareContent : true, - shallow: typeof opts.shallow === 'boolean' ? opts.shallow : true, - paths: Array.isArray(opts.paths) ? opts.paths.filter(v => typeof v === 'string') : false, - addOnly: typeof opts.addOnly === 'boolean' ? opts.addOnly : false - } -} - -// helper to read a file via promise and return a null on fail -async function stat (fs, filepath) { - return new Promise(resolve => { - fs.stat(filepath, (_, data) => { - resolve(data || null) - }) - }) -} - -// helper to read a file via promise and return an empty string on fail -async function readFile (fs, filepath) { - return new Promise(resolve => { - fs.readFile(filepath, {encoding: 'utf8'}, (_, data) => { - resolve(data || '') - }) - }) -} - -// helper to go from '/foo/bar/baz' to ['/', '/foo', '/foo/bar', '/foo/bar/baz'] -function explodeFilePaths (str) { - str = str.replace(/^\/|\/$/g, '') // strip leading and trailing slashes - var paths = str.split('/') - let lastPath = '' - for (let i = 0; i < paths.length; i++) { - lastPath = paths[i] = `${lastPath}/${paths[i]}` - } - return paths -} diff --git a/dat/daemon/index.js b/dat/daemon/index.js deleted file mode 100644 index c7ef8f9f..00000000 --- a/dat/daemon/index.js +++ /dev/null @@ -1,691 +0,0 @@ -const crypto = require('crypto') -const EventEmitter = require('events') -const emitStream = require('emit-stream') -const CircularAppendFile = require('circular-append-file') -const through = require('through2') -const split = require('split2') -const concat = require('concat-stream') -const throttle = require('lodash.throttle') -const isEqual = require('lodash.isequal') -const pump = require('pump') -const jetpack = require('fs-jetpack') -const {join} = require('path') - -// dat modules -const hyperdrive = require('hyperdrive') -const hypercoreProtocol = require('hypercore-protocol') -const pda = require('pauls-dat-api') -const datEncoding = require('dat-encoding') - -// network modules -const swarmDefaults = require('datland-swarm-defaults') -const discoverySwarm = require('discovery-swarm') -const networkSpeed = require('hyperdrive-network-speed') -const {ThrottleGroup} = require('stream-throttle') - -const baseLogger = require('./logger') -const logger = baseLogger.child({category: 'dat', subcategory: 'daemon'}) -const datStorage = require('./storage') -const folderSync = require('./folder-sync') -const {addArchiveSwarmLogging} = require('./logging-utils') -const datExtensions = require('./extensions') -const scopedFSes = require('../../lib/scoped-fses') -const {DAT_SWARM_PORT} = require('../../lib/const') -const RPC_MANIFEST = require('./manifest') - -// globals -// = - -var datPath -var networkId = crypto.randomBytes(32) -var archives = {} // in-memory cache of archive objects. key -> archive -var archivesByDKey = {} // same, but discoveryKey -> archive -var daemonEvents = new EventEmitter() -var debugEvents = new EventEmitter() -var debugLogFile -var archiveSwarm - -var upThrottleGroup -var downThrottleGroup - -// exported api -// = - -exports.setup = async function ({rpcAPI, logfilePath}) { - // export API - rpcAPI.exportAPI('dat-daemon', RPC_MANIFEST, RPC_API) - - // setup storage - await datStorage.setup() - debugLogFile = CircularAppendFile(logfilePath, {maxSize: 1024 /* 1kb */ * 1024 /* 1mb */ * 50 /* 50mb */ }) - - // setup extension messages - datExtensions.setup() - - // re-export events - folderSync.events.on('sync', (key, direction) => { - daemonEvents.emit('folder-synced', { - details: { - url: `dat://${datEncoding.toStr(key)}`, - direction - } - }) - }) - folderSync.events.on('error', (key, err) => { - daemonEvents.emit('folder-sync-error', { - details: { - url: `dat://${datEncoding.toStr(key)}`, - name: err.name, - message: err.message - } - }) - }) - - // setup the archive swarm - archiveSwarm = discoverySwarm(swarmDefaults({ - id: networkId, - hash: false, - utp: true, - tcp: true, - dht: false, - connect: connectReplicationStream, - stream: createReplicationStream - })) - addArchiveSwarmLogging({archivesByDKey, log, archiveSwarm}) - archiveSwarm.once('error', () => archiveSwarm.listen(0)) - archiveSwarm.listen(DAT_SWARM_PORT) - archiveSwarm.on('error', error => log(null, {event: 'swarm-error', message: error.toString()}, 'warn')) - - logger.info('Initialized dat daemon') -} - -// rpc api -// = - -const RPC_API = { - // setup & config - // = - - /** - * @method - * @param {*} opts - */ - async setup (opts) { - datPath = opts.datPath - folderSync.setup(opts) - }, - - // up/down are in MB/s - async setBandwidthThrottle ({up, down}) { - logger.info('Setting bandwidth throttle', {details: {up, down}}) - if (typeof up !== 'undefined') { - upThrottleGroup = up ? new ThrottleGroup({rate: up * 1e6}) : null - } - if (typeof down !== 'undefined') { - downThrottleGroup = down ? new ThrottleGroup({rate: down * 1e6}) : null - } - }, - - // event streams & debug - // = - - createLogStream () { - return emitStream(baseLogger.events) - }, - - createEventStream () { - return emitStream(daemonEvents) - }, - - createDebugStream () { - return emitStream(debugEvents) - }, - - async getDebugLog (key) { - return new Promise((resolve, reject) => { - let rs = debugLogFile.createReadStream() - rs - .pipe(split()) - .pipe(through({encoding: 'utf8', decodeStrings: false}, (data, _, cb) => { - if (data && (!key || data.startsWith(key))) { - return cb(null, data.slice(64) + '\n') - } - cb() - })) - .pipe(concat({encoding: 'string'}, resolve)) - rs.on('error', reject) - }) - }, - - // archive management - // = - - async configureArchive (key, userSettings) { - var archive = getArchive(key) - if (archive) { - configureNetwork(archive, userSettings) - configureAutoDownload(archive, userSettings) - configureLocalSync(archive, userSettings) - } - }, - - async getArchiveInfo (key) { - var archive = getArchive(key) - if (!archive) return {} - return { - version: archive.version, - size: archive.size, - peers: archive.metadata.peers.length, - peerInfo: getArchivePeerInfos(archive), - peerHistory: archive.peerHistory, - networkStats: archive.networkStats - } - }, - - async getArchiveNetworkStats (key) { - var archive = getArchive(key) - if (!archive) return {} - return archive.networkStats - }, - - updateSizeTracking, - - async loadArchive (opts) { - var { - key, - secretKey, - metaPath, - userSettings - } = opts - var logDetails = {key: key.toString('hex')} - - // create the archive instance - logger.verbose('Loading archive', {details: logDetails}) - var archive = hyperdrive(datStorage.create(metaPath), key, { - sparse: true, - secretKey - // metadataStorageCacheSize: 0, - // contentStorageCacheSize: 0, - // treeCacheSize: 2048 - }) - archive.on('error', err => { - let k = key.toString('hex') - log(k, {event: 'archive-error', message: err.toString()}, 'warn') - console.error('Error in archive', k, err) - }) - archive.metadata.on('peer-add', () => onNetworkChanged(archive)) - archive.metadata.on('peer-remove', () => onNetworkChanged(archive)) - archive.networkStats = networkSpeed(archive) - archive.replicationStreams = [] // list of all active replication streams - archive.peerHistory = [] // samples of the peer count - - // wait for ready - await new Promise((resolve, reject) => { - archive.ready(err => { - if (err) reject(err) - else resolve() - }) - }) - logger.silly('Archive ready', {details: {key: logDetails}}) - await updateSizeTracking(archive) - - // attach extensions - datExtensions.attach(archive) - - // store in the discovery listing, so the swarmer can find it - // but not yet in the regular archives listing, because it's not fully loaded - var discoveryKey = datEncoding.toStr(archive.discoveryKey) - archivesByDKey[discoveryKey] = archive - - // setup the archive based on current settings - configureNetwork(archive, userSettings) - configureAutoDownload(archive, userSettings) - configureLocalSync(archive, userSettings) - - // await initial metadata sync if not the owner - if (!archive.writable && !archive.metadata.length) { - // wait to receive a first update - await new Promise((resolve, reject) => { - archive.metadata.update(err => { - if (err) reject(err) - else resolve() - }) - }) - } - if (!archive.writable) { - // always download all metadata - archive.metadata.download({start: 0, end: -1}) - } - - // watch for sync events - archive.fileActStream = pda.watch(archive) - archive.fileActStream.on('data', ([event, {path}]) => { - if (event === 'changed') { - if (!archive.localSyncSettings) return - // need to sync this change to the local folder - if (archive.localSyncSettings.autoPublish) { - // bidirectional sync: use the sync queue - folderSync.queueSyncEvent(archive, {toFolder: true}) - } else { - // preview mode: just write this update to disk - folderSync.syncArchiveToFolder(archive, {paths: [path], shallow: false}) - } - } - }) - - // store in the archives list - archives[datEncoding.toStr(archive.key)] = archive - - // return some archive info - return {discoveryKey, writable: archive.writable} - }, - - async unloadArchive (key) { - const archive = archives[key] - if (!archive) { - return - } - logger.verbose('Unloading archive', {details: {key}}) - - // shutdown archive - leaveSwarm(key) - stopAutodownload(archive) - if (archive.fileActStream) { - archive.fileActStream.end() - archive.fileActStream = null - } - datExtensions.detach(archive) - await new Promise((resolve, reject) => { - archive.close(err => { - if (err) reject(err) - else resolve() - }) - }) - delete archivesByDKey[datEncoding.toStr(archive.discoveryKey)] - delete archives[key] - }, - - // archive methods - // = - - callArchiveAsyncMethod (key, version, method, ...args) { - var checkout = getArchiveCheckout(key, version) - checkout[method](...args) - }, - - callArchiveReadStreamMethod (key, version, method, ...args) { - var checkout = getArchiveCheckout(key, version) - return checkout[method](...args) - }, - - callArchiveWriteStreamMethod (key, version, method, ...args) { - var checkout = getArchiveCheckout(key, version) - return checkout[method](...args) - }, - - callArchivePDAPromiseMethod (key, version, method, ...args) { - var checkout = getArchiveCheckout(key, version) - return pda[method](checkout, ...args) - }, - - callArchivePDAReadStreamMethod (key, version, method, ...args) { - var checkout = getArchiveCheckout(key, version) - return pda[method](checkout, ...args) - }, - - async clearFileCache (key, userSettings) { - var archive = await getArchive(key) - if (!archive || archive.writable) { - return // abort, only clear the content cache of downloaded archives - } - logger.info('Clearing archive file cache', {details: {key: key.toString('hex')}}) - - // clear the cache - await new Promise((resolve, reject) => { - archive.content.clear(0, archive.content.length, err => { - if (err) reject(err) - else resolve() - }) - }) - - // force a reconfig of the autodownloader - stopAutodownload(archive) - configureAutoDownload(archive, userSettings) - }, - - async exportFilesystemToArchive (opts) { - opts.dstArchive = getArchive(opts.dstArchive) - return pda.exportFilesystemToArchive(opts) - }, - - async exportArchiveToFilesystem (opts) { - opts.srcArchive = getArchive(opts.srcArchive) - return pda.exportFilesystemToArchive(opts) - }, - - async exportArchiveToArchive (opts) { - opts.srcArchive = getArchive(opts.srcArchive) - opts.dstArchive = getArchive(opts.dstArchive) - return pda.exportArchiveToArchive(opts) - }, - - // folder sync - // = - - fs_assertSafePath: folderSync.assertSafePath, - fs_ensureSyncFinished: key => folderSync.ensureSyncFinished(getArchive(key)), - fs_diffListing: (key, ...args) => folderSync.diffListing(getArchive(key), ...args), - fs_diffFile: (key, ...args) => folderSync.diffFile(getArchive(key), ...args), - fe_queueSyncEvent: (key, ...args) => folderSync.queueSyncEvent(getArchive(key), ...args), - fs_syncFolderToArchive: (key, ...args) => folderSync.syncFolderToArchive(getArchive(key), ...args), - fs_syncArchiveToFolder: (key, ...args) => folderSync.syncArchiveToFolder(getArchive(key), ...args), - - // dat extensions - // = - - ext_listPeers: async (key, ...args) => datExtensions.listPeers(getArchive(key), ...args), - ext_getPeer: async (key, ...args) => datExtensions.getPeer(getArchive(key), ...args), - ext_getOwnPeerId: () => datEncoding.toStr(networkId), - ext_broadcastEphemeralMessage: async (key, ...args) => datExtensions.broadcastEphemeralMessage(getArchive(key), ...args), - ext_sendEphemeralMessage: async (key, ...args) => datExtensions.sendEphemeralMessage(getArchive(key), ...args), - ext_getSessionData: async (key, ...args) => datExtensions.getSessionData(getArchive(key), ...args), - ext_setSessionData: async (key, ...args) => datExtensions.setSessionData(getArchive(key), ...args), - ext_createDatPeersStream: async (key, ...args) => datExtensions.createDatPeersStream(getArchive(key), ...args) -} - -// archive networking -// = - -// set the networking of an archive based on settings -function configureNetwork (archive, settings) { - if (!settings || settings.networked) { - joinSwarm(archive) - } else { - leaveSwarm(archive) - } -} - -// put the archive into the network, for upload and download -const joinSwarm = exports.joinSwarm = function joinSwarm (key, opts) { - var archive = (typeof key === 'object' && key.key) ? key : getArchive(key) - if (!archive || archive.isSwarming) return - archiveSwarm.join(archive.discoveryKey) - var keyStr = datEncoding.toStr(archive.key) - log(keyStr, { - event: 'swarming', - discoveryKey: datEncoding.toStr(archive.discoveryKey) - }) - archive.isSwarming = true -} - -// take the archive out of the network -const leaveSwarm = exports.leaveSwarm = function leaveSwarm (key) { - var archive = (typeof key === 'object' && key.discoveryKey) ? key : getArchive(key) - if (!archive || !archive.isSwarming) return - - var keyStr = datEncoding.toStr(archive.key) - log(keyStr, { - event: 'unswarming', - message: `Disconnected ${archive.metadata.peers.length} peers` - }) - - archive.replicationStreams.forEach(stream => stream.destroy()) // stop all active replications - archive.replicationStreams.length = 0 - archiveSwarm.leave(archive.discoveryKey) - archive.isSwarming = false -} - -// internal methods -// = - -function getArchive (key) { - if (key instanceof hyperdrive) return key - if (key.key) key = key.key - return archives[datEncoding.toStr(key)] -} - -function getArchiveCheckout (key, version) { - var archive = getArchive(key) - var checkoutFS = archive - if (version) { - let seq = parseInt(version) - if (Number.isNaN(seq)) { - if (version === 'latest') { - // ignore, we use latest by default - } else if (version === 'preview') { - if (archive.localSyncSettings) { - // checkout local sync path - checkoutFS = scopedFSes.get(archive.localSyncSettings.path) - checkoutFS.setFilter(p => folderSync.applyDatIgnoreFilter(archive, p)) - } else { - let err = new Error('Preview mode is not enabled for this dat') - err.noPreviewMode = true - throw err - } - } else { - throw new Error('Invalid version identifier:' + version) - } - } else { - if (seq <= 0) throw new Error('Version too low') - if (seq > archive.version) throw new Error('Version too high') - checkoutFS = archive.checkout(seq, {metadataStorageCacheSize: 0, contentStorageCacheSize: 0, treeCacheSize: 0}) - } - } - return checkoutFS -} - -async function updateSizeTracking (archive) { - archive = getArchive(archive) - try { - archive.size = await pda.readSize(archive, '/') - } catch (e) { - archive.size = 0 - } - return archive.size -} - -function configureAutoDownload (archive, userSettings) { - if (archive.writable) { - return // abort, only used for unwritable - } - // HACK - // mafintosh is planning to put APIs for this inside of hyperdrive - // till then, we'll do our own inefficient downloader - // -prf - const isAutoDownloading = userSettings.isSaved && userSettings.autoDownload - if (!archive._autodownloader && isAutoDownloading) { - // setup the autodownload - archive._autodownloader = { - undownloadAll: () => { - if (archive.content) { - archive.content._selections.forEach(range => archive.content.undownload(range)) - } - }, - onUpdate: throttle(() => { - // cancel ALL previous, then prioritize ALL current - archive._autodownloader.undownloadAll() - pda.download(archive, '/').catch(e => { /* ignore cancels */ }) - }, 5e3) - } - archive.metadata.on('download', archive._autodownloader.onUpdate) - pda.download(archive, '/').catch(e => { /* ignore cancels */ }) - } else if (archive._autodownloader && !isAutoDownloading) { - stopAutodownload(archive) - } -} - -function configureLocalSync (archive, userSettings) { - var oldLocalSyncSettings = archive.localSyncSettings - archive.localSyncSettings = getLocalSyncSettings(archive, userSettings) - - if (!isEqual(archive.localSyncSettings, oldLocalSyncSettings)) { - // configure the local folder watcher if a change occurred - folderSync.configureFolderToArchiveWatcher(archive) - } - - if (!archive.localSyncSettings || !archive.localSyncSettings.isUsingInternal) { - // clear the internal directory if it's not in use - jetpack.removeAsync(getInternalLocalSyncPath(archive)) - } -} - -function getLocalSyncSettings (archive, userSettings) { - if (!archive.writable || !userSettings.isSaved) { - return false - } - if (userSettings.localSyncPath) { - return { - path: userSettings.localSyncPath, - autoPublish: !userSettings.previewMode - } - } - if (userSettings.previewMode) { - return { - path: getInternalLocalSyncPath(archive), - autoPublish: false, - isUsingInternal: true - } - } - return false -} - -function stopAutodownload (archive) { - if (archive._autodownloader) { - archive._autodownloader.undownloadAll() - archive.metadata.removeListener('download', archive._autodownloader.onUpdate) - archive._autodownloader = null - } -} - -function connectReplicationStream (local, remote) { - var streams = [local, remote, local] - if (upThrottleGroup) streams.splice(1, 0, upThrottleGroup.throttle()) - if (downThrottleGroup) streams.splice(-1, 0, downThrottleGroup.throttle()) - pump(streams) -} - -function createReplicationStream (info) { - // create the protocol stream - var streamKeys = [] // list of keys replicated over the streamd - var stream = hypercoreProtocol({ - id: networkId, - live: true, - encrypt: true, - extensions: ['ephemeral', 'session-data'] - }) - stream.peerInfo = info - - // add the archive if the discovery network gave us any info - if (info.channel) { - add(info.channel) - } - - // add any requested archives - stream.on('feed', add) - - function add (dkey) { - // lookup the archive - var dkeyStr = datEncoding.toStr(dkey) - var archive = archivesByDKey[dkeyStr] - if (!archive || !archive.isSwarming) { - return - } - if (archive.replicationStreams.indexOf(stream) !== -1) { - return // already replicating - } - - // create the replication stream - archive.replicate({stream, live: true}) - if (stream.destroyed) return // in case the stream was destroyed during setup - - // track the stream - var keyStr = datEncoding.toStr(archive.key) - streamKeys.push(keyStr) - archive.replicationStreams.push(stream) - function onend () { - archive.replicationStreams = archive.replicationStreams.filter(s => (s !== stream)) - } - stream.once('error', onend) - stream.once('end', onend) - stream.once('finish', onend) - stream.once('close', onend) - } - - // debugging - stream.on('error', err => { - log(streamKeys, { - event: 'connection-error', - peer: `${info.host}:${info.port}`, - connectionType: info.type, - message: err.toString() - }, 'warn') - }) - - return stream -} - -function onNetworkChanged (archive) { - var now = Date.now() - var lastHistory = archive.peerHistory.slice(-1)[0] - if (lastHistory && (now - lastHistory.ts) < 10e3) { - // if the last datapoint was < 10s ago, just update it - lastHistory.peers = archive.metadata.peers.length - } else { - archive.peerHistory.push({ - ts: Date.now(), - peers: archive.metadata.peers.length - }) - } - - // keep peerHistory from getting too long - if (archive.peerHistory.length >= 500) { - // downsize to 360 points, which at 10s intervals covers one hour - archive.peerHistory = archive.peerHistory.slice(archive.peerHistory.length - 360) - } - - // count # of peers - var totalPeerCount = 0 - for (var k in archives) { - totalPeerCount += archives[k].metadata.peers.length - } - - daemonEvents.emit('network-changed', { - details: { - url: `dat://${datEncoding.toStr(archive.key)}`, - peers: getArchivePeerInfos(archive), - connections: archive.metadata.peers.length, - totalPeerCount - } - }) -} - -function getArchivePeerInfos (archive) { - // old way, more accurate? - // archive.replicationStreams.map(s => ({host: s.peerInfo.host, port: s.peerInfo.port})) - - return archive.metadata.peers.map(peer => peer.stream.stream.peerInfo).filter(Boolean) -} - -function getInternalLocalSyncPath (archiveOrKey) { - var key = datEncoding.toStr(archiveOrKey.key || archiveOrKey) - return join(datPath, 'Archives', 'LocalCopy', key.slice(0, 2), key.slice(2)) -} - -// helpers -// = - -function log (key, data, logLevel = false) { - var keys = Array.isArray(key) ? key : [key] - keys.forEach(k => { - let data2 = Object.assign(data, {archiveKey: k}) - debugEvents.emit(k, data2) - debugEvents.emit('all', data2) - }) - if (keys[0]) { - debugLogFile.append(keys[0] + JSON.stringify(data) + '\n') - } - if (logLevel) { - let message = data.event + (data.message ? `: ${data.message}` : '') - logger.log(logLevel, message, {details: {key, peer: data.peer}}) - } -} \ No newline at end of file diff --git a/dat/daemon/logger.js b/dat/daemon/logger.js deleted file mode 100644 index e5b07c5e..00000000 --- a/dat/daemon/logger.js +++ /dev/null @@ -1,32 +0,0 @@ -/** - * This logger is just an event-emitter wrapper which streams to the main process. - * The main process then folds the events into the main logger. - */ - -const Emitter = require('events') - -// globals -// = - -const events = new Emitter() - -// exported api -// = - -exports.events = events - -exports.child = (meta = {}) => { - const log = (level, message, etc = {}) => { - Object.assign(etc, meta) - events.emit('log', {level, message, etc}) - } - return { - log, - error: (...args) => log('error', ...args), - warn: (...args) => log('warn', ...args), - info: (...args) => log('info', ...args), - verbose: (...args) => log('verbose', ...args), - debug: (...args) => log('debug', ...args), - silly: (...args) => log('silly', ...args) - } -} \ No newline at end of file diff --git a/dat/daemon/logging-utils.js b/dat/daemon/logging-utils.js deleted file mode 100644 index df028362..00000000 --- a/dat/daemon/logging-utils.js +++ /dev/null @@ -1,232 +0,0 @@ -const datEncoding = require('dat-encoding') - -const findFullDiscoveryKey = exports.findFullDiscoveryKey = function (archivesByDKey, key) { - key = Buffer.isBuffer(key) ? key.toString('hex') : key - // HACK - // if the key is short, try to find the full thing in our list - // (this shouldnt be needed once discovery stops truncating keys) - // -prf - if (key && key.length === 40) { - let dKeys = Object.keys(archivesByDKey) - for (let i = 0; i < dKeys.length; i++) { - if (dKeys[i].startsWith(key)) { - return dKeys[i] - } - } - } - return key -} - -const getDNSMessageDiscoveryKey = exports.getDNSMessageDiscoveryKey = function (archivesByDKey, msg) { - var key - function check (obj) { - if (!key && obj.name.endsWith('.dat.local')) { - key = findFullDiscoveryKey(archivesByDKey, obj.name.slice(0, -10)) - } - } - if (msg.questions) msg.questions.forEach(check) - if (msg.answers) msg.answers.forEach(check) - if (msg.additionals) msg.additionals.forEach(check) - return key || '' -} - -function has (str, v) { - return str.indexOf(v) !== -1 -} - -const addArchiveSwarmLogging = exports.addArchiveSwarmLogging = function ({archivesByDKey, log, archiveSwarm}) { - archiveSwarm.on('listening', () => { - archiveSwarm._discovery.dns.on('traffic', (type, details) => { - let archive = archivesByDKey[getDNSMessageDiscoveryKey(archivesByDKey, details.message)] - if (!archive) return - log(datEncoding.toStr(archive.key), { - event: 'traffic', - trafficType: type, - messageId: details.message.id, - message: renderDNSTraffic(details.message), - peer: details.peer ? `${details.peer.address || details.peer.host}:${details.peer.port}` : undefined - }) - }) - }) - archiveSwarm.on('peer', (peer) => { - let archive = archivesByDKey[findFullDiscoveryKey(archivesByDKey, peer.channel)] - if (!archive) return - log(datEncoding.toStr(archive.key), { - event: 'peer-found', - peer: `${peer.address || peer.host}:${peer.port}` - }, 'silly') - }) - archiveSwarm.on('peer-banned', (peer, details) => { - let archive = archivesByDKey[findFullDiscoveryKey(archivesByDKey, peer.channel)] - if (!archive) return - log(datEncoding.toStr(archive.key), { - event: 'peer-banned', - peer: `${peer.address || peer.host}:${peer.port}`, - message: peerBannedReason(details.reason) - }, 'info') - }) - archiveSwarm.on('peer-rejected', (peer, details) => { - let archive = archivesByDKey[findFullDiscoveryKey(archivesByDKey, peer.channel)] - if (!archive) return - log(datEncoding.toStr(archive.key), { - event: 'peer-rejected', - peer: `${peer.address || peer.host}:${peer.port}`, - message: peerRejectedReason(details.reason) - }, 'silly') - }) - archiveSwarm.on('drop', (peer) => { - let archive = archivesByDKey[findFullDiscoveryKey(archivesByDKey, peer.channel)] - if (!archive) return - log(datEncoding.toStr(archive.key), { - event: 'peer-dropped', - peer: `${peer.address || peer.host}:${peer.port}`, - message: 'Too many failed connection attempts' - }, 'silly') - }) - archiveSwarm.on('connecting', (peer) => { - let archive = archivesByDKey[findFullDiscoveryKey(archivesByDKey, peer.channel)] - if (!archive) return - log(datEncoding.toStr(archive.key), { - event: 'connecting', - peer: `${peer.address || peer.host}:${peer.port}` - }, 'debug') - }) - archiveSwarm.on('connect-failed', (peer, details) => { - let archive = archivesByDKey[findFullDiscoveryKey(archivesByDKey, peer.channel)] - if (!archive) return - log(datEncoding.toStr(archive.key), { - event: 'connect-failed', - peer: `${peer.address || peer.host}:${peer.port}`, - message: connectFailedMessage(details) - }, 'debug') - }) - archiveSwarm.on('handshaking', (conn, peer) => { - let archive = archivesByDKey[findFullDiscoveryKey(archivesByDKey, peer.channel)] - if (!archive) return - log(datEncoding.toStr(archive.key), { - event: 'handshaking', - peer: `${peer.address || peer.host}:${peer.port}`, - connectionId: conn._debugId, - connectionType: peer.type, - ts: 0 - }, 'silly') - }) - archiveSwarm.on('handshake-timeout', (conn, peer) => { - let archive = archivesByDKey[findFullDiscoveryKey(archivesByDKey, peer.channel)] - if (!archive) return - log(datEncoding.toStr(archive.key), { - event: 'handshake-timeout', - peer: `${peer.address || peer.host}:${peer.port}`, - connectionId: conn._debugId, - connectionType: peer.type, - ts: Date.now() - conn._debugStartTime - }, 'silly') - }) - archiveSwarm.on('connection', (conn, peer) => { - let archive = archivesByDKey[findFullDiscoveryKey(archivesByDKey, peer.channel)] - if (!archive) return - log(datEncoding.toStr(archive.key), { - event: 'connection-established', - peer: `${peer.address || peer.host}:${peer.port}`, - connectionId: conn._debugId, - connectionType: peer.type, - ts: Date.now() - conn._debugStartTime, - message: 'Starting replication' - }, 'debug') - }) - archiveSwarm.on('redundant-connection', (conn, peer) => { - let archive = archivesByDKey[findFullDiscoveryKey(archivesByDKey, peer.channel)] - if (!archive) return - log(datEncoding.toStr(archive.key), { - event: 'redundant-connection', - peer: `${peer.address || peer.host}:${peer.port}`, - connectionId: conn._debugId, - connectionType: peer.type, - ts: Date.now() - conn._debugStartTime - }, 'silly') - }) - archiveSwarm.on('connection-closed', (conn, peer) => { - let archive = archivesByDKey[findFullDiscoveryKey(archivesByDKey, peer.channel)] - if (!archive) return - log(datEncoding.toStr(archive.key), { - event: 'connection-closed', - peer: `${peer.address || peer.host}:${peer.port}`, - connectionId: conn._debugId, - connectionType: peer.type, - ts: Date.now() - conn._debugStartTime - }, 'debug') - }) -} - -const renderDNSTraffic = exports.renderDNSTraffic = function ({questions, answers, additionals}) { - var messageParts = [] - if (questions && (!answers || !answers.length) && (!additionals || !additionals.length)) { - questions.forEach(q => { - if (q.type === 'TXT') { - messageParts.push('TXT Question (requesting peers list)') - } else { - messageParts.push(q.type + ' Question') - } - }) - } - if (answers) { - answers.forEach(a => { - if (a.type === 'TXT' && a.data) { - let data = a.data.toString() - if (has(data, 'host') && has(data, 'token')) { - messageParts.push('TXT Answer (heres a session token)') - } else if (has(data, 'peers')) { - messageParts.push('TXT Answer (heres a peers list)') - } else if (has(data, 'token')) { - messageParts.push('TXT Answer (no peers found)') - } else { - messageParts.push('TXT Answer') - } - } else { - messageParts.push(a.type + ' Answer') - } - }) - } - if (additionals) { - additionals.forEach(a => { - if (a.type === 'TXT' && a.data) { - let data = a.data.toString() - if (has(data, 'announce')) { - messageParts.push('TXT Additional (announcing self)') - } else if (has(data, 'unannounce')) { - messageParts.push('TXT Additional (unannouncing self)') - } else if (has(data, 'subscribe')) { - messageParts.push('TXT Additional (subscribing)') - } else { - messageParts.push('TXT Additional') - } - } else if (a.type === 'SRV' && a.data) { - messageParts.push('SRV Additional (pushed announcement)') - } else { - messageParts.push(a.type + ' Additional') - } - }) - } - return messageParts.join(', ') -} - -function connectFailedMessage (details) { - if (details.timedout) return 'Timed out' -} - -function peerBannedReason (reason) { - switch (reason) { - case 'detected-self': return 'Detected that the peer is this process' - case 'application': return 'Peer was removed by the application' - } - return '' -} - -function peerRejectedReason (reason) { - switch (reason) { - case 'whitelist': return 'Peer was not on the whitelist' - case 'banned': return 'Peer is on the ban list' - case 'duplicate': return 'Peer was a duplicate (already being handled)' - } - return '' -} diff --git a/dat/daemon/manifest.js b/dat/daemon/manifest.js deleted file mode 100644 index 09a78f27..00000000 --- a/dat/daemon/manifest.js +++ /dev/null @@ -1,153 +0,0 @@ -/** - * @typedef {import('../../dbs/archives').LibraryArchiveUserSettings} LibraryArchiveUserSettings - * - * @typedef {Object} DatDaemon - * @prop {function(DatDaemonSetupOpts): Promise} setup - * @prop {function(DatDaemonThrottleOpts): Promise} setBandwidthThrottle - * @prop {function(): NodeJS.ReadableStream} createLogStream - * @prop {function(): NodeJS.ReadableStream} createEventStream - * @prop {function(): NodeJS.ReadableStream} createDebugStream - * @prop {function(string): Promise} getDebugLog - * @prop {function(string | Buffer, LibraryArchiveUserSettings): Promise} configureArchive - * @prop {function(string | Buffer): Promise} getArchiveInfo - * @prop {function(string | Buffer): Promise} getArchiveNetworkStats - * @prop {function(string | Buffer): Promise} updateSizeTracking - * @prop {function(DatDaemonLoadArchiveOpts): Promise} loadArchive - * @prop {function(string): Promise} unloadArchive - * @prop {function(any=, ...any=): void} callArchiveAsyncMethod - * @prop {function(any=, ...any=): NodeJS.ReadableStream} callArchiveReadStreamMethod - * @prop {function(any=, ...any=): NodeJS.WritableStream} callArchiveWriteStreamMethod - * @prop {function(any=, ...any=): Promise} callArchivePDAPromiseMethod - * @prop {function(any=, ...any=): NodeJS.ReadableStream} callArchivePDAReadStreamMethod - * @prop {function(string | Buffer, LibraryArchiveUserSettings): Promise} clearFileCache - * @prop {function(Object): Promise} exportFilesystemToArchive - * @prop {function(Object): Promise} exportArchiveToFilesystem - * @prop {function(Object): Promise} exportArchiveToArchive - * @prop {function(string): Promise} fs_assertSafePath - * @prop {function(string | Buffer): Promise} fs_ensureSyncFinished - * @prop {function(string | Buffer, [DatDaemonFSDiffListingOpts]): Promise} fs_diffListing - * @prop {function(string | Buffer, string): Promise} fs_diffFile - * @prop {function(string | Buffer, DatDaemonFSQueueSyncEventOpts): Promise} fe_queueSyncEvent - * @prop {function(string | Buffer, [DatDaemonFSDiffListingOpts]): Promise} fs_syncFolderToArchive - * @prop {function(string | Buffer, [DatDaemonFSDiffListingOpts]): Promise} fs_syncArchiveToFolder - * @prop {function(any=, ...any=): Promise} ext_listPeers - * @prop {function(any=, ...any=): Promise} ext_getPeer - * @prop {function(any=, ...any=): Promise} ext_broadcastEphemeralMessage - * @prop {function(any=, ...any=): Promise} ext_sendEphemeralMessage - * @prop {function(any=, ...any=): Promise} ext_getSessionData - * @prop {function(any=, ...any=): Promise} ext_setSessionData - * @prop {function(any=, ...any=): NodeJS.ReadableStream} ext_createDatPeersStream - * NOTE: the ext_* methods are temporary so Im not going to bother documenting their types - * - * @typedef {Object} DatDaemonSetupOpts - * @prop {string} datPath - * @prop {string[]} disallowedSavePaths - * - * @typedef {Object} DatDaemonThrottleOpts - * @prop {number} [up] - * @prop {number} [down] - * - * @typedef {Object} DatDaemonLoadArchiveOpts - * @prop {string | Buffer} key - * @prop {Buffer} [secretKey] - * @prop {string} metaPath - * @prop {LibraryArchiveUserSettings} userSettings - * - * @typedef {Object} DatDaemonFSDiffListingOpts - * @prop {boolean} [shallow] - Dont descend into changed folders (default true) - * @prop {boolean} [compareContent] - Compare the actual content (default true) - * @prop {string[]} [paths] - A whitelist of files to compare - * @prop {string} [localSyncPath] - Override the archive localSyncPath - * @prop {boolean} [addOnly] - Dont modify or remove any files (default false) - * - * @typedef {Object} DatDaemonFSQueueSyncEventOpts - * @prop {boolean} toFolder - * @prop {boolean} toArchive - * - * @typedef {Object} DatDaemonLoadedArchiveInfo - * @prop {Buffer} discoveryKey - * @prop {boolean} writable - * - * @typedef {never} DatDaemonPeerInfo - * TODO- what's in here? - * - * @typedef {Object} DatDaemonPeerHistory - * @prop {number} ts - * @prop {number} peers - * - * @typedef {Object} DatDaemonNetworkStats - * @prop {number} downloadSpeed - * @prop {number} uploadSpeed - * @prop {number} downloadTotal - * @prop {number} uploadTotal - * - * @typedef {Object} DatDaemonArchiveInfo - * @prop {number} version - * @prop {number} size - * @prop {number} peers - * @prop {DatDaemonPeerInfo[]} peerInfo - * @prop {DatDaemonPeerHistory[]} peerHistory - * @prop {DatDaemonNetworkStats} networkStats - * - * @typedef {never} DatDaemonFSListingDiff - * TODO - what's in here? - * - * @typedef {never} DatDaemonFSFileDiff - * TODO - what's in here? - */ - -module.exports = { - // setup & config - - setup: 'promise', - setBandwidthThrottle: 'promise', - - // event streams & debug - - createLogStream: 'readable', - createEventStream: 'readable', - createDebugStream: 'readable', - getDebugLog: 'promise', - - // archive management - - configureArchive: 'promise', - getArchiveInfo: 'promise', - getArchiveNetworkStats: 'promise', - updateSizeTracking: 'promise', - loadArchive: 'promise', - unloadArchive: 'promise', - - // archive methods - - callArchiveAsyncMethod: 'async', - callArchiveReadStreamMethod: 'readable', - callArchiveWriteStreamMethod: 'writable', - callArchivePDAPromiseMethod: 'promise', - callArchivePDAReadStreamMethod: 'readable', - clearFileCache: 'promise', - exportFilesystemToArchive: 'async', - exportArchiveToFilesystem: 'async', - exportArchiveToArchive: 'async', - - // folder sync - - fs_assertSafePath: 'promise', - fs_ensureSyncFinished: 'promise', - fs_diffListing: 'promise', - fs_diffFile: 'promise', - fe_queueSyncEvent: 'promise', - fs_syncFolderToArchive: 'promise', - fs_syncArchiveToFolder: 'promise', - - // dat extensions - - ext_listPeers: 'promise', - ext_getPeer: 'promise', - ext_getOwnPeerId: 'promise', - ext_broadcastEphemeralMessage: 'promise', - ext_sendEphemeralMessage: 'promise', - ext_getSessionData: 'promise', - ext_setSessionData: 'promise', - ext_createDatPeersStream: 'readable' -} diff --git a/dat/daemon/storage.js b/dat/daemon/storage.js deleted file mode 100644 index a2215846..00000000 --- a/dat/daemon/storage.js +++ /dev/null @@ -1,53 +0,0 @@ -const path = require('path') -const fs = require('fs') -const detectSparseFiles = require('supports-sparse-files') -const raf = require('random-access-file') -const raif = require('random-access-indexed-file') -const logger = require('./logger').child({category: 'dat', subcategory: 'storage'}) - -// globals -// = - -const LARGE_FILES = ['data', 'signatures'] -const INDEX_BLOCK_SIZE = { - data: 1024 * 1024, // 1mb - signatures: 1024 // 1kb -} -var supportsSparseFiles = false - -// exported api -// = - -exports.setup = async function () { - await new Promise((resolve) => { - detectSparseFiles(function (err, yes) { - supportsSparseFiles = yes - if (!yes) { - logger.info('Sparse-file support not detected. Falling back to indexed data files.') - } - resolve() - }) - }) -} - -function createStorage (folder, subfolder) { - return function (name) { - var filepath = path.join(folder, subfolder, name) - if (fs.existsSync(filepath + '.index')) { - // use random-access-indexed-file because that's what has been used - return raif(filepath, {blockSize: INDEX_BLOCK_SIZE[name]}) - } - if (!supportsSparseFiles && LARGE_FILES.includes(name)) { - // use random-access-indexed-file because sparse-files are not supported and this file tends to get big - return raif(filepath, {blockSize: INDEX_BLOCK_SIZE[name]}) - } - return raf(filepath) - } -} - -exports.create = function (folder) { - return { - metadata: createStorage(folder, 'metadata'), - content: createStorage(folder, 'content') - } -} \ No newline at end of file diff --git a/dat/debugging.js b/dat/debugging.js index b7dc0351..7572fad8 100644 --- a/dat/debugging.js +++ b/dat/debugging.js @@ -1,4 +1,4 @@ -const {getActiveArchives} = require('./library') +const {getActiveArchives} = require('./archives') const datDns = require('./dns') /** @@ -17,9 +17,6 @@ exports.archivesDebugPage = function () { Content DKey${a.content.discoveryKey.toString('hex')} Meta Key${a.key.toString('hex')} Content Key${a.content.key.toString('hex')} - ${a.replicationStreams.map((s, i) => ` - Peer ${i}${s.peerInfo.type} ${s.peerInfo.host}:${s.peerInfo.port} - `).join('')} ` }).join('')} diff --git a/dat/directory-listing-page.js b/dat/directory-listing-page.js deleted file mode 100644 index 18a75881..00000000 --- a/dat/directory-listing-page.js +++ /dev/null @@ -1,83 +0,0 @@ -const {pluralize, makeSafe} = require('../lib/strings') -const {stat, readdir} = require('pauls-dat-api') -const {join, relative} = require('path') - -/** @typedef {import('./library').InternalDatArchive} InternalDatArchive */ - -const styles = `` - -/** - * @prop {InternalDatArchive} archive - * @prop {string} dirPath - * @prop {string} webRoot - * @returns {Promise} - */ -module.exports = async function renderDirectoryListingPage (archive, dirPath, webRoot) { - // handle the webroot - webRoot = webRoot || '/' - const realPath = p => join(webRoot, p) - const webrootPath = p => relative(webRoot, p) - - // list files - var names = [] - try { names = await readdir(archive, realPath(dirPath)) } catch (e) {} - - // stat each file - var entries = /** @type any[] */(await Promise.all(names.map(async (name) => { - var entry - var entryPath = join(dirPath, name) - try { entry = await stat(archive, realPath(entryPath)) } catch (e) { return false } - entry.path = webrootPath(entryPath) - entry.name = name - return entry - }))) - entries = entries.filter(Boolean) - - // sort the listing - entries.sort((a, b) => { - // directories on top - if (a.isDirectory() && !b.isDirectory()) return -1 - if (!a.isDirectory() && b.isDirectory()) return 1 - // alphabetical after that - return a.name.localeCompare(b.name) - }) - - // show the updog if path is not top - var updog = '' - if (['/', '', '..'].includes(webrootPath(dirPath)) === false) { - updog = `
..
` - } - - // render entries - var totalFiles = 0 - var entriesStr = entries.map(entry => { - totalFiles++ - var url = makeSafe(entry.path) - if (!url.startsWith('/')) url = '/' + url // all urls should have a leading slash - if (entry.isDirectory() && !url.endsWith('/')) url += '/' // all dirs should have a trailing slash - var type = entry.isDirectory() ? 'directory' : 'file' - return `
${makeSafe(entry.name)}
` - }).join('') - - // render summary - var summary = `
${totalFiles} ${pluralize(totalFiles, 'file')}
` - - // render final - return '' + styles + updog + entriesStr + summary -} diff --git a/dat/dns.js b/dat/dns.js index b75e6f57..3461c076 100644 --- a/dat/dns.js +++ b/dat/dns.js @@ -1,7 +1,7 @@ const parseDatURL = require('parse-dat-url') const {InvalidDomainName} = require('beaker-error-constants') const datDnsDb = require('../dbs/dat-dns') -const library = require('./library') +const archives = require('./archives') const {DAT_HASH_REGEX} = require('../lib/const') const logger = require('../logger').child({category: 'dat', subcategory: 'dns'}) @@ -40,5 +40,5 @@ async function read (name, err) { } async function write (name, key) { if (DAT_HASH_REGEX.test(name)) return // dont write for raw urls - await library.confirmDomain(key) + await archives.confirmDomain(key) } diff --git a/dat/garbage-collector.js b/dat/garbage-collector.js deleted file mode 100644 index 1c28db8e..00000000 --- a/dat/garbage-collector.js +++ /dev/null @@ -1,93 +0,0 @@ -const ms = require('ms') -const archivesDb = require('../dbs/archives') -const datLibrary = require('./library') -const { - DAT_GC_FIRST_COLLECT_WAIT, - DAT_GC_REGULAR_COLLECT_WAIT -} = require('../lib/const') -const logger = require('../logger').child({category: 'dat', subcategory: 'garbage-collector'}) - -// typedefs -// = - -/** - * @typedef {Object} CollectResult - * @prop {number} totalBytes - * @prop {number} totalArchives - * @prop {number} skippedArchives - */ - -// globals -// = - -var nextGCTimeout - -// exported API -// = - -exports.setup = function () { - schedule(DAT_GC_FIRST_COLLECT_WAIT) -} - -/** - * @param {Object} [opts] - * @param {number} [opts.olderThan] - * @param {boolean} [opts.isOwner] - * @returns {Promise} - */ -const collect = exports.collect = async function ({olderThan, isOwner} = {}) { - logger.info('Running GC') - - // clear any scheduled GC - if (nextGCTimeout) { - clearTimeout(nextGCTimeout) - nextGCTimeout = null - } - - // run the GC - var totalBytes = 0 - var skippedArchives = 0 - var startTime = Date.now() - - // first unsave expired archives - var expiredArchives = await archivesDb.listExpiredArchives() - if (expiredArchives.length) { - logger.info(`Unsaving ${expiredArchives.length} expired archives`) - } - var promises = [] - for (let i = 0; i < expiredArchives.length; i++) { - promises.push(archivesDb.setUserSettings(0, expiredArchives[i].key, {isSaved: false})) - } - await Promise.all(promises) - - // now GC old archives - var unusedArchives = await archivesDb.listGarbageCollectableArchives({olderThan, isOwner}) - if (unusedArchives.length) { - logger.info(`Cleaning out ${unusedArchives.length} unused archives`) - logger.silly('Archives:', {urls: unusedArchives.map(a => a.key)}) - } - for (let i = 0; i < unusedArchives.length; i++) { - await datLibrary.unloadArchive(unusedArchives[i].key) - totalBytes += await archivesDb.deleteArchive(unusedArchives[i].key) - } - - logger.debug(`GC completed in ${Date.now() - startTime} ms`) - - // schedule the next GC - schedule(DAT_GC_REGULAR_COLLECT_WAIT) - logger.debug(`Scheduling next run to happen in ${ms(DAT_GC_REGULAR_COLLECT_WAIT)}`) - - // return stats - return {totalBytes, totalArchives: unusedArchives.length - skippedArchives, skippedArchives} -} - -// helpers -// = - -/** - * @param {number} time - */ -function schedule (time) { - nextGCTimeout = setTimeout(collect, time) - nextGCTimeout.unref() -} diff --git a/dat/index.js b/dat/index.js index 512518b3..53150eb9 100644 --- a/dat/index.js +++ b/dat/index.js @@ -1,9 +1,12 @@ module.exports = { + archives: require('./archives'), assets: require('./assets'), debug: require('./debugging'), dns: require('./dns'), - garbageCollector: require('./garbage-collector'), - library: require('./library'), protocol: require('./protocol'), - watchlist: require('./watchlist') + watchlist: require('./watchlist'), + async setup (opts) { + await this.archives.setup(opts) + await this.watchlist.setup() + } } diff --git a/dat/library.js b/dat/library.js deleted file mode 100644 index 6a656315..00000000 --- a/dat/library.js +++ /dev/null @@ -1,764 +0,0 @@ -const emitStream = require('emit-stream') -const EventEmitter = require('events') -const datEncoding = require('dat-encoding') -const pify = require('pify') -const signatures = require('sodium-signatures') -const parseDatURL = require('parse-dat-url') -const _debounce = require('lodash.debounce') -const mkdirp = require('mkdirp') -const baseLogger = require('../logger').get() -const logger = baseLogger.child({category: 'dat', subcategory: 'library'}) - -// dbs -const siteData = require('../dbs/sitedata') -const settingsDb = require('../dbs/settings') -const archivesDb = require('../dbs/archives') -const datDnsDb = require('../dbs/dat-dns') - -// dat modules -const datGC = require('./garbage-collector') -const datAssets = require('./assets') - -// constants -// = - -const { - DAT_HASH_REGEX, - DAT_PRESERVED_FIELDS_ON_FORK -} = require('../lib/const') -const {InvalidURLError, TimeoutError} = require('beaker-error-constants') -const DAT_DAEMON_MANIFEST = require('./daemon/manifest') - -// typedefs -// = - -/** - * @typedef {import('./daemon/manifest').DatDaemon} DatDaemon - * @typedef {import('../dbs/archives').LibraryArchiveRecord} LibraryArchiveRecord - * - * @typedef {Object} InternalDatArchive - * @prop {Buffer} key - * @prop {string} url - * @prop {string?} domain - * @prop {Buffer} discoveryKey - * @prop {boolean} writable - * @prop {function(Function): void} ready - * @prop {function(Object, Function=): void} download - * @prop {function(Object=): NodeJS.ReadableStream} history - * @prop {function(Object=): NodeJS.ReadableStream} createReadStream - * @prop {function(string, Object=, Function=): any} readFile - * @prop {function(number, Object=): NodeJS.ReadableStream} createDiffStream - * @prop {function(string, Object=): NodeJS.WritableStream} createWriteStream - * @prop {function(string, any, Object=, Function=): void} writeFile - * @prop {function(string, Function=): void} unlink - * @prop {function(string, Object=, Function=): void} mkdir - * @prop {function(string, Function=): void} rmdir - * @prop {function(string, Object=, Function=): void} readdir - * @prop {function(string, Object=, Function=): void} stat - * @prop {function(string, Object=, Function=): void} lstat - * @prop {function(string, Object=, Function=): void} access - * @prop {Object} pda - * @prop {function(string): Promise} pda.stat - * @prop {function(string, Object=): Promise} pda.readFile - * @prop {function(string, Object=): Promise>} pda.readdir - * @prop {function(string): Promise} pda.readSize - * @prop {function(string, any, Object=): Promise} pda.writeFile - * @prop {function(string): Promise} pda.mkdir - * @prop {function(string, string): Promise} pda.copy - * @prop {function(string, string): Promise} pda.rename - * @prop {function(string): Promise} pda.unlink - * @prop {function(string, Object=): Promise} pda.rmdir - * @prop {function(string=): Promise} pda.download - * @prop {function(string=): NodeJS.ReadableStream} pda.watch - * @prop {function(): NodeJS.ReadableStream} pda.createNetworkActivityStream - * @prop {function(): Promise} pda.readManifest - * @prop {function(Object): Promise} pda.writeManifest - * @prop {function(Object): Promise} pda.updateManifest - */ - -// globals -// = - -var archives = {} // in-memory cache of archive objects. key -> archive -var archiveLoadPromises = {} // key -> promise -var archivesEvents = new EventEmitter() -var daemonEvents -var daemon = /** @type DatDaemon */({}) - -// exported API -// = - -/** - * @param {Object} opts - * @param {Object} opts.rpcAPI - * @param {Object} opts.datDaemonProcess - * @param {string[]} opts.disallowedSavePaths - * @return {Promise} - */ -exports.setup = async function setup ({rpcAPI, datDaemonProcess, disallowedSavePaths}) { - // connect to the daemon - daemon = rpcAPI.importAPI('dat-daemon', DAT_DAEMON_MANIFEST, {proc: datDaemonProcess, timeout: false}) - daemon.setup({disallowedSavePaths, datPath: archivesDb.getDatPath()}) - daemonEvents = emitStream(daemon.createEventStream()) - - // pipe the log - var daemonLogEvents = emitStream(daemon.createLogStream()) - daemonLogEvents.on('log', ({level, message, etc}) => { - baseLogger.log(level, message, etc) - }) - - // wire up event handlers - archivesDb.on('update:archive-user-settings', async (key, userSettings, newUserSettings) => { - // emit event - var details = { - url: 'dat://' + key, - isSaved: userSettings.isSaved, - hidden: userSettings.hidden, - networked: userSettings.networked, - autoDownload: userSettings.autoDownload, - autoUpload: userSettings.autoUpload, - localSyncPath: userSettings.localSyncPath, - previewMode: userSettings.previewMode - } - archivesEvents.emit('updated', {details}) - if ('isSaved' in newUserSettings) { - archivesEvents.emit(newUserSettings.isSaved ? 'added' : 'removed', {details}) - } - - // delete all perms for deleted archives - if (!userSettings.isSaved) { - siteData.clearPermissionAllOrigins('modifyDat:' + key) - } - - // update the download based on these settings - daemon.configureArchive(key, userSettings) - }) - datDnsDb.on('update', ({key, name}) => { - var archive = getArchive(key) - if (archive) { - archive.domain = name - } - }) - - // re-export events - daemonEvents.on('network-changed', evt => archivesEvents.emit('network-changed', evt)) - daemonEvents.on('folder-synced', evt => archivesEvents.emit('folder-synced', evt)) - daemonEvents.on('folder-sync-error', evt => archivesEvents.emit('folder-sync-error', evt)) - - // configure the bandwidth throttle - settingsDb.getAll().then(({dat_bandwidth_limit_up, dat_bandwidth_limit_down}) => { - daemon.setBandwidthThrottle({ - up: dat_bandwidth_limit_up, - down: dat_bandwidth_limit_down - }) - }) - settingsDb.on('set:dat_bandwidth_limit_up', up => daemon.setBandwidthThrottle({up})) - settingsDb.on('set:dat_bandwidth_limit_down', down => daemon.setBandwidthThrottle({down})) - - // start the GC manager - datGC.setup() - logger.info('Initialized dat library') -} - -/** - * @returns {DatDaemon} - */ -exports.getDaemon = () => daemon - -/** - * @returns {Promise} - */ -exports.loadSavedArchives = function () { - // load and configure all saved archives - return archivesDb.query(0, {isSaved: true}).then( - async (/** @type LibraryArchiveRecord[] */archives) => { - // HACK - // load the archives one at a time and give 5 seconds between each - // why: the purpose of loading saved archives is to seed them - // loading them all at once can bog down the user's device - // if the user tries to access an archive, Beaker will load it immediately - // so spacing out the loads has no visible impact on the user - // (except for reducing the overall load for the user) - // -prf - for (let a of archives) { - loadArchive(a.key, a.userSettings) - await new Promise(r => setTimeout(r, 5e3)) // wait 5s - } - }, - err => console.error('Failed to load networked archives', err) - ) -} - -/** - * @returns {NodeJS.ReadableStream} - */ -exports.createEventStream = function createEventStream () { - return emitStream.toStream(archivesEvents) -} - -/** - * @param {string} key - * @returns {Promise} - */ -exports.getDebugLog = function getDebugLog (key) { - return daemon.getDebugLog(key) -} - -/** - * @returns {NodeJS.ReadableStream} - */ -exports.createDebugStream = function createDebugStream () { - return daemon.createDebugStream() -} - -// read metadata for the archive, and store it in the meta db -const pullLatestArchiveMeta = exports.pullLatestArchiveMeta = async function pullLatestArchiveMeta (archive, {updateMTime} = {}) { - try { - var key = archive.key.toString('hex') - - // ready() just in case (we need .blocks) - await pify(archive.ready.bind(archive))() - - // trigger DNS update - confirmDomain(key) - - // read the archive meta and size on disk - var [manifest, oldMeta, size] = await Promise.all([ - archive.pda.readManifest().catch(_ => {}), - archivesDb.getMeta(key), - daemon.updateSizeTracking(key) - ]) - var {title, description, type} = (manifest || {}) - var isOwner = archive.writable - var mtime = updateMTime ? Date.now() : oldMeta.mtime - - // write the record - var details = {title, description, type, mtime, size, isOwner} - await archivesDb.setMeta(key, details) - - // emit the updated event - details.url = 'dat://' + key - archivesEvents.emit('updated', {details}) - return details - } catch (e) { - console.error('Error pulling meta', e) - } -} - -// archive creation -// = - -const createNewArchive = exports.createNewArchive = async function createNewArchive (manifest = {}, settings = false) { - var userSettings = { - isSaved: !(settings && settings.isSaved === false), - networked: !(settings && settings.networked === false), - hidden: settings && settings.hidden === true, - previewMode: settings && settings.previewMode === true, - localSyncPath: settings && settings.localSyncPath - } - - // create the archive - var archive = await loadArchive(null, userSettings) - var key = datEncoding.toStr(archive.key) - - // write the manifest and default datignore - await Promise.all([ - archive.pda.writeManifest(manifest), - archive.pda.writeFile('/.datignore', await settingsDb.get('default_dat_ignore'), 'utf8') - ]) - - // write the user settings - await archivesDb.setUserSettings(0, key, userSettings) - - // write the metadata - await pullLatestArchiveMeta(archive) - - return `dat://${key}/` -} - -exports.forkArchive = async function forkArchive (srcArchiveUrl, manifest = {}, settings = undefined) { - srcArchiveUrl = fromKeyToURL(srcArchiveUrl) - - // get the source archive - var srcArchive - var downloadRes = await Promise.race([ - (async function () { - srcArchive = await getOrLoadArchive(srcArchiveUrl) - if (!srcArchive) { - throw new Error('Invalid archive key') - } - return srcArchive.pda.download('/') - })(), - new Promise(r => setTimeout(() => r('timeout'), 60e3)) - ]) - if (downloadRes === 'timeout') { - throw new TimeoutError('Timed out while downloading source archive') - } - - // fetch source archive meta - var srcManifest = await srcArchive.pda.readManifest().catch(_ => {}) - srcManifest = srcManifest || {} - - // override any manifest data - var dstManifest = { - title: (manifest.title) ? manifest.title : srcManifest.title, - description: (manifest.description) ? manifest.description : srcManifest.description, - type: (manifest.type) ? manifest.type : srcManifest.type, - author: manifest.author - } - DAT_PRESERVED_FIELDS_ON_FORK.forEach(field => { - if (srcManifest[field]) { - dstManifest[field] = srcManifest[field] - } - }) - - // create the new archive - var dstArchiveUrl = await createNewArchive(dstManifest, settings) - var dstArchive = getArchive(dstArchiveUrl) - - // copy files - var ignore = ['/.dat', '/.git', '/dat.json'] - await daemon.exportArchiveToArchive({ - srcArchive: datEncoding.toStr(srcArchive.key), - dstArchive: datEncoding.toStr(dstArchive.key), - skipUndownloadedFiles: true, - ignore - }) - - // write a .datignore if DNE - try { - await dstArchive.pda.stat('/.datignore') - } catch (e) { - await dstArchive.pda.writeFile('/.datignore', await settingsDb.get('default_dat_ignore'), 'utf8') - } - - return dstArchiveUrl -} - -// archive management -// = - -const loadArchive = exports.loadArchive = async function loadArchive (key, userSettings = null) { - // validate key - var secretKey - if (key) { - if (!Buffer.isBuffer(key)) { - // existing dat - key = await fromURLToKey(key, true) - if (!DAT_HASH_REGEX.test(key)) { - throw new InvalidURLError() - } - key = datEncoding.toBuf(key) - } - } else { - // new dat, generate keys - var kp = signatures.keyPair() - key = kp.publicKey - secretKey = kp.secretKey - } - - // fallback to the promise, if possible - var keyStr = datEncoding.toStr(key) - if (keyStr in archiveLoadPromises) { - return archiveLoadPromises[keyStr] - } - - // run and cache the promise - var p = loadArchiveInner(key, secretKey, userSettings) - archiveLoadPromises[keyStr] = p - p.catch(err => { - console.error('Failed to load archive', keyStr, err.toString()) - }) - - // when done, clear the promise - const clear = () => delete archiveLoadPromises[keyStr] - p.then(clear, clear) - - return p -} - -// main logic, separated out so we can capture the promise -async function loadArchiveInner (key, secretKey, userSettings = null) { - // load the user settings as needed - if (!userSettings) { - try { - userSettings = await archivesDb.getUserSettings(0, key) - } catch (e) { - userSettings = {networked: true} - } - } - if (!('networked' in userSettings)) { - userSettings.networked = true - } - - // ensure the folders exist - var metaPath = archivesDb.getArchiveMetaPath(key) - mkdirp.sync(metaPath) - - // load the archive in the daemon - var archiveInfo = await daemon.loadArchive({ - key, - secretKey, - metaPath, - userSettings - }) - - // create the archive proxy instance - var archive = createArchiveProxy(key, undefined, archiveInfo) - - // fetch dns name if known - let dnsRecord = await datDnsDb.getCurrentByKey(datEncoding.toStr(key)) - archive.domain = dnsRecord ? dnsRecord.name : undefined - - // update db - archivesDb.touch(key).catch(err => console.error('Failed to update lastAccessTime for archive', key, err)) - await pullLatestArchiveMeta(archive) - datAssets.update(archive) - - // wire up events - archive.pullLatestArchiveMeta = _debounce(opts => pullLatestArchiveMeta(archive, opts), 1e3) - archive.fileActStream = archive.pda.watch() - archive.fileActStream.on('data', ([event, {path}]) => { - if (event === 'changed') { - archive.pullLatestArchiveMeta({updateMTime: true}) - datAssets.update(archive, [path]) - } - }) - - // now store in main archives listing, as loaded - archives[datEncoding.toStr(archive.key)] = archive - return archive -} - -const getArchive = exports.getArchive = function getArchive (key) { - key = fromURLToKey(key) - return archives[key] -} - -exports.getArchiveCheckout = function getArchiveCheckout (archive, version) { - var isHistoric = false - var isPreview = false - var checkoutFS = archive - if (version) { - let seq = parseInt(version) - if (Number.isNaN(seq)) { - if (version === 'latest') { - // ignore, we use latest by default - } else if (version === 'preview') { - isPreview = true - checkoutFS = createArchiveProxy(archive.key, 'preview', archive) - checkoutFS.domain = archive.domain - } else { - throw new Error('Invalid version identifier:' + version) - } - } else { - checkoutFS = createArchiveProxy(archive.key, version, archive) - checkoutFS.domain = archive.domain - isHistoric = true - } - } - return {isHistoric, isPreview, checkoutFS} -} - -exports.getActiveArchives = function getActiveArchives () { - return archives -} - -const getOrLoadArchive = exports.getOrLoadArchive = async function getOrLoadArchive (key, opts) { - key = await fromURLToKey(key, true) - var archive = getArchive(key) - if (archive) { - return archive - } - return loadArchive(key, opts) -} - -exports.unloadArchive = async function unloadArchive (key) { - key = await fromURLToKey(key, true) - var archive = archives[key] - if (!archive) return - if (archive.fileActStream) { - archive.fileActStream.close() - archive.fileActStream = null - } - delete archives[key] - await daemon.unloadArchive(key) -} - -const isArchiveLoaded = exports.isArchiveLoaded = function isArchiveLoaded (key) { - key = fromURLToKey(key) - return key in archives -} - -exports.updateSizeTracking = function updateSizeTracking (archive) { - return daemon.updateSizeTracking(datEncoding.toStr(archive.key)) -} - -// archive fetch/query -// = - -exports.queryArchives = async function queryArchives (query) { - // run the query - var archiveInfos = await archivesDb.query(0, query) - if (!archiveInfos) return undefined - var isArray = Array.isArray(archiveInfos) - if (!isArray) archiveInfos = [archiveInfos] - - if (query && ('inMemory' in query)) { - archiveInfos = archiveInfos.filter(archiveInfo => isArchiveLoaded(archiveInfo.key) === query.inMemory) - } - - // attach some live data - await Promise.all(archiveInfos.map(async (archiveInfo) => { - var archive = getArchive(archiveInfo.key) - if (archive) { - var info = await daemon.getArchiveInfo(archiveInfo.key) - archiveInfo.isSwarmed = archiveInfo.userSettings.networked - archiveInfo.size = info.size - archiveInfo.peers = info.peers - archiveInfo.peerHistory = info.peerHistory - } else { - archiveInfo.isSwarmed = false - archiveInfo.peers = 0 - archiveInfo.peerHistory = [] - } - })) - return isArray ? archiveInfos : archiveInfos[0] -} - -exports.getArchiveInfo = async function getArchiveInfo (key) { - // get the archive - key = await fromURLToKey(key, true) - var archive = await getOrLoadArchive(key) - - // fetch archive data - var [meta, userSettings, manifest, archiveInfo] = await Promise.all([ - archivesDb.getMeta(key), - archivesDb.getUserSettings(0, key), - archive.pda.readManifest().catch(_ => {}), - daemon.getArchiveInfo(key) - ]) - manifest = manifest || {} - meta.key = key - meta.url = archive.url - meta.domain = archive.domain - meta.links = manifest.links || {} - meta.manifest = manifest - meta.version = archiveInfo.version - meta.size = archiveInfo.size - meta.userSettings = { - isSaved: userSettings.isSaved, - hidden: userSettings.hidden, - networked: userSettings.networked, - autoDownload: userSettings.autoDownload, - autoUpload: userSettings.autoUpload, - expiresAt: userSettings.expiresAt, - localSyncPath: userSettings.localSyncPath, - previewMode: userSettings.previewMode - } - meta.peers = archiveInfo.peers - meta.peerInfo = archiveInfo.peerInfo - meta.peerHistory = archiveInfo.peerHistory - meta.networkStats = archiveInfo.networkStats - - return meta -} - -exports.getArchiveNetworkStats = async function getArchiveNetworkStats (key) { - key = await fromURLToKey(key, true) - return daemon.getArchiveNetworkStats(key) -} - -exports.clearFileCache = async function clearFileCache (key) { - var userSettings = await archivesDb.getUserSettings(0, key) - return daemon.clearFileCache(key, userSettings) -} - -/** - * @desc - * Get the primary URL for a given dat URL - * - * @param {string} url - * @returns {Promise} - */ -const getPrimaryUrl = exports.getPrimaryUrl = async function (url) { - var key = await fromURLToKey(url, true) - var datDnsRecord = await datDnsDb.getCurrentByKey(key) - if (!datDnsRecord) return `dat://${key}` - return `dat://${datDnsRecord.name}` -} - -/** - * @desc - * Check that the archive's dat.json `domain` matches the current DNS - * If yes, write the confirmed entry to the dat_dns table - * - * @param {string} key - * @returns {Promise} - */ -const confirmDomain = exports.confirmDomain = async function (key) { - // fetch the current domain from the manifest - try { - var archive = await getOrLoadArchive(key) - var datJson = await archive.pda.readManifest() - } catch (e) { - return false - } - if (!datJson.domain) { - await datDnsDb.unset(key) - return false - } - - // confirm match with current DNS - var dnsKey = await require('./dns').resolveName(datJson.domain) - if (key !== dnsKey) { - await datDnsDb.unset(key) - return false - } - - // update mapping - await datDnsDb.update({name: datJson.domain, key}) - return true -} - -// helpers -// = - -const fromURLToKey = exports.fromURLToKey = function fromURLToKey (url, lookupDns = false) { - if (Buffer.isBuffer(url)) { - return url - } - if (DAT_HASH_REGEX.test(url)) { - // simple case: given the key - return url - } - - var urlp = parseDatURL(url) - - // validate - if (urlp.protocol !== 'dat:') { - throw new InvalidURLError('URL must be a dat: scheme') - } - if (!DAT_HASH_REGEX.test(urlp.host)) { - if (!lookupDns) { - throw new InvalidURLError('Hostname is not a valid hash') - } - return require('./dns').resolveName(urlp.host) - } - - return urlp.host -} - -const fromKeyToURL = exports.fromKeyToURL = function fromKeyToURL (key) { - if (typeof key !== 'string') { - key = datEncoding.toStr(key) - } - if (!key.startsWith('dat://')) { - return `dat://${key}/` - } - return key -} - -// archive proxy -// = - -function makeArchiveProxyCbFn (key, version, method) { - return (...args) => daemon.callArchiveAsyncMethod(key, version, method, ...args) -} - -function makeArchiveProxyReadStreamFn (key, version, method) { - return (...args) => daemon.callArchiveReadStreamMethod(key, version, method, ...args) -} - -function makeArchiveProxyWriteStreamFn (key, version, method) { - return (...args) => daemon.callArchiveWriteStreamMethod(key, version, method, ...args) -} - -function makeArchiveProxyPDAPromiseFn (key, version, method) { - return (...args) => daemon.callArchivePDAPromiseMethod(key, version, method, ...args) -} - -function makeArchiveProxyPDAReadStreamFn (key, version, method) { - return (...args) => daemon.callArchivePDAReadStreamMethod(key, version, method, ...args) -} - -function fixStatObject (st) { - st.atime = (new Date(st.atime)).getTime() - st.mtime = (new Date(st.mtime)).getTime() - st.ctime = (new Date(st.ctime)).getTime() - st.isSocket = () => false - st.isSymbolicLink = () => false - st.isFile = () => (st.mode & 32768) === 32768 - st.isBlockDevice = () => false - st.isDirectory = () => (st.mode & 16384) === 16384 - st.isCharacterDevice = () => false - st.isFIFO = () => false -} - -/** - * - * @param {string|Buffer} key - * @param {number} version - * @param {Object} archiveInfo - * @returns {InternalDatArchive} - */ -function createArchiveProxy (key, version, archiveInfo) { - key = datEncoding.toStr(key) - const stat = makeArchiveProxyCbFn(key, version, 'stat') - const pdaStat = makeArchiveProxyPDAPromiseFn(key, version, 'stat') - return { - key: datEncoding.toBuf(key), - get url () { - return `dat://${this.domain || key}${version ? '+' + version : ''}` - }, - domain: undefined, - discoveryKey: datEncoding.toBuf(archiveInfo.discoveryKey), - writable: archiveInfo.writable, - - ready: makeArchiveProxyCbFn(key, version, 'ready'), - download: makeArchiveProxyCbFn(key, version, 'download'), - history: makeArchiveProxyReadStreamFn(key, version, 'history'), - createReadStream: makeArchiveProxyReadStreamFn(key, version, 'createReadStream'), - readFile: makeArchiveProxyCbFn(key, version, 'readFile'), - createDiffStream: makeArchiveProxyReadStreamFn(key, version, 'createDiffStream'), - createWriteStream: makeArchiveProxyWriteStreamFn(key, version, 'createWriteStream'), - writeFile: makeArchiveProxyCbFn(key, version, 'writeFile'), - unlink: makeArchiveProxyCbFn(key, version, 'unlink'), - mkdir: makeArchiveProxyCbFn(key, version, 'mkdir'), - rmdir: makeArchiveProxyCbFn(key, version, 'rmdir'), - readdir: makeArchiveProxyCbFn(key, version, 'readdir'), - stat: (...args) => { - var cb = args.pop() - args.push((err, st) => { - if (st) fixStatObject(st) - cb(err, st) - }) - stat(...args) - }, - lstat: makeArchiveProxyCbFn(key, version, 'lstat'), - access: makeArchiveProxyCbFn(key, version, 'access'), - - pda: { - stat: async (...args) => { - var st = await pdaStat(...args) - if (st) fixStatObject(st) - return st - }, - readFile: makeArchiveProxyPDAPromiseFn(key, version, 'readFile'), - readdir: makeArchiveProxyPDAPromiseFn(key, version, 'readdir'), - readSize: makeArchiveProxyPDAPromiseFn(key, version, 'readSize'), - writeFile: makeArchiveProxyPDAPromiseFn(key, version, 'writeFile'), - mkdir: makeArchiveProxyPDAPromiseFn(key, version, 'mkdir'), - copy: makeArchiveProxyPDAPromiseFn(key, version, 'copy'), - rename: makeArchiveProxyPDAPromiseFn(key, version, 'rename'), - unlink: makeArchiveProxyPDAPromiseFn(key, version, 'unlink'), - rmdir: makeArchiveProxyPDAPromiseFn(key, version, 'rmdir'), - download: makeArchiveProxyPDAPromiseFn(key, version, 'download'), - watch: makeArchiveProxyPDAReadStreamFn(key, version, 'watch'), - createNetworkActivityStream: makeArchiveProxyPDAReadStreamFn(key, version, 'createNetworkActivityStream'), - readManifest: makeArchiveProxyPDAPromiseFn(key, version, 'readManifest'), - writeManifest: makeArchiveProxyPDAPromiseFn(key, version, 'writeManifest'), - updateManifest: makeArchiveProxyPDAPromiseFn(key, version, 'updateManifest') - } - } -} diff --git a/dat/protocol.js b/dat/protocol.js index 6de06db0..f8878481 100644 --- a/dat/protocol.js +++ b/dat/protocol.js @@ -7,12 +7,12 @@ const intoStream = require('into-stream') const {toZipStream} = require('../lib/zip') const slugify = require('slugify') const markdown = require('../lib/markdown') +const libTools = require('@beaker/library-tools') const datDns = require('./dns') -const datLibrary = require('./library') +const datArchives = require('./archives') const datServeResolvePath = require('@beaker/dat-serve-resolve-path') -const directoryListingPage = require('./directory-listing-page') const errorPage = require('../lib/error-page') const mime = require('../lib/mime') const {makeSafe} = require('../lib/strings') @@ -119,7 +119,7 @@ exports.electronHandler = async function (request, respond) { try { // start searching the network - archive = await datLibrary.getOrLoadArchive(archiveKey) + archive = await datArchives.getOrLoadArchive(archiveKey) } catch (err) { logger.warn('Failed to open archive', {url: archiveKey, err}) cleanup() @@ -134,31 +134,37 @@ exports.electronHandler = async function (request, respond) { // checkout version if needed try { - var {checkoutFS} = datLibrary.getArchiveCheckout(archive, urlp.version) - if (urlp.version === 'preview') { - await checkoutFS.pda.stat('/') // run a stat to ensure preview mode exists - } + var {checkoutFS} = await datArchives.getArchiveCheckout(archive, urlp.version) } catch (err) { - if (err.noPreviewMode) { - // redirect to non-preview version - return respond({ - statusCode: 303, - headers: { - Location: `dat://${urlp.host}${urlp.pathname || '/'}${urlp.search || ''}` - }, - data: intoStream('') - }) - } else { - logger.warn('Failed to open archive checkout', {url: archiveKey, err}) - cleanup() - return respondError(500, 'Failed') - } + logger.warn('Failed to open archive checkout', {url: archiveKey, err}) + cleanup() + return respondError(500, 'Failed') } // read the manifest (it's needed in a couple places) var manifest try { manifest = await checkoutFS.pda.readManifest() } catch (e) { manifest = null } + // read type and configure + var category = libTools.typeToCategory(manifest ? manifest.type : '', false) || 'files' + const hasViewerApp = category !== 'website' + const canExecuteHTML = !hasViewerApp + + // render root-page applications by type + if (hasViewerApp && mime.acceptHeaderWantsHTML(request.headers.Accept)) { + return respond({ + statusCode: 200, + headers: { + // TODO CSP + 'Content-Type': 'text/html' + }, + data: intoStream(` + + +`) + }) + } + // read manifest CSP if (manifest && manifest.content_security_policy && typeof manifest.content_security_policy === 'string') { cspHeader = manifest.content_security_policy @@ -206,24 +212,6 @@ exports.electronHandler = async function (request, respond) { var headers = {} var entry = await datServeResolvePath(checkoutFS.pda, manifest, urlp, request.headers.Accept) - // use theme template if it exists - var themeSettings = { - active: false, - js: false, - css: false - } - if (!urlp.query.disable_theme) { - if (entry && mime.acceptHeaderWantsHTML(request.headers.Accept) && ['.html', '.htm', '.md'].includes(extname(entry.path))) { - let exists = async (path) => await checkoutFS.pda.stat(path).then(() => true, () => false) - let [js, css] = await Promise.all([exists('/theme/index.js'), exists('/theme/index.css')]) - if (js || css) { - themeSettings.active = true - themeSettings.css = css - themeSettings.js = js - } - } - } - // handle folder if (entry && entry.isDirectory()) { cleanup() @@ -258,7 +246,7 @@ exports.electronHandler = async function (request, respond) { // caching is disabled till we can figure out why // -prf // caching if-match - // const ETag = (checkoutFS.isLocalFS) ? false : 'block-' + entry.offset + // const ETag = 'block-' + entry.offset // if (request.headers['if-none-match'] === ETag) { // return respondError(304, 'Not Modified') // } @@ -296,38 +284,29 @@ exports.electronHandler = async function (request, respond) { // markdown rendering if (!range && entry.path.endsWith('.md') && mime.acceptHeaderWantsHTML(request.headers.Accept)) { let content = await checkoutFS.pda.readFile(entry.path, 'utf8') + let contentType = canExecuteHTML ? 'text/html' : 'text/plain' + content = canExecuteHTML ? markdown.render(content) : content return respond({ statusCode: 200, headers: Object.assign(headers, { - 'Content-Type': 'text/html' + 'Content-Type': contentType }), - data: intoStream(markdown.render(content, themeSettings)) - }) - } - - // theme wrapping - if (themeSettings.active) { - let html = await checkoutFS.pda.readFile(entry.path, 'utf8') - html = ` -${themeSettings.js ? `` : ''} -${themeSettings.css ? `` : ''} -${html}` - return respond({ - statusCode: 200, - headers: Object.assign(headers, { - 'Content-Type': 'text/html' - }), - data: intoStream(html) + data: intoStream(content) }) } // fetch the entry and stream the response - fileReadStream = checkoutFS.createReadStream(entry.path, range) + fileReadStream = await checkoutFS.pda.createReadStream(entry.path, range) var dataStream = fileReadStream .pipe(mime.identifyStream(entry.path, mimeType => { // cleanup the timeout now, as bytes have begun to stream cleanup() + // disable html as needed + if (!canExecuteHTML && mimeType.includes('html')) { + mimeType = 'text/plain' + } + // send headers, now that we can identify the data headersSent = true Object.assign(headers, { diff --git a/dat/watchlist.js b/dat/watchlist.js index 1be83b1d..d1dfbd74 100644 --- a/dat/watchlist.js +++ b/dat/watchlist.js @@ -3,8 +3,8 @@ const emitStream = require('emit-stream') const logger = require('../logger').child({category: 'dat', subcategory: 'watchlist'}) // dat modules -const datLibrary = require('../dat/library') -const datDns = require('../dat/dns') +const datArchives = require('./archives') +const datDns = require('./dns') const watchlistDb = require('../dbs/watchlist') // globals @@ -92,7 +92,7 @@ async function watch (site) { } // load archive - var archive = await datLibrary.loadArchive(key) + var archive = await datArchives.loadArchive(key) if (site.resolved === 0) { watchlistEvents.emit('resolved', site) } diff --git a/dbs/archive-drafts.js b/dbs/archive-drafts.js deleted file mode 100644 index f39974b8..00000000 --- a/dbs/archive-drafts.js +++ /dev/null @@ -1,30 +0,0 @@ -const db = require('./profile-data-db') -const archivesDb = require('./archives') - -// exported api -// = - -exports.list = async function (profileId, masterKey) { - // get draft list - var records = await db.all(`SELECT draftKey as key FROM archive_drafts WHERE profileId = ? AND masterKey = ? ORDER BY createdAt`, [profileId, masterKey]) - // fetch full info from archives db - return Promise.all(records.map(async ({key}) => archivesDb.query(profileId, {key, showHidden: true}))) -} - -exports.add = function (profileId, masterKey, draftKey) { - return db.run(` - INSERT OR REPLACE - INTO archive_drafts (profileId, masterKey, draftKey) - VALUES (?, ?, ?) - `, [profileId, masterKey, draftKey]) -} - -exports.remove = function (profileId, masterKey, draftKey) { - return db.run(`DELETE FROM archive_drafts WHERE profileId = ? AND masterKey = ? AND draftKey = ?`, [profileId, masterKey, draftKey]) -} - -exports.getMaster = async function (profileId, draftKey) { - var record = await db.get(`SELECT masterKey as key FROM archive_drafts WHERE profileId = ? AND draftKey = ?`, [profileId, draftKey]) - if (record) return record.key - return draftKey -} \ No newline at end of file diff --git a/dbs/archives.js b/dbs/archives.js index 0c320592..e3d9723e 100644 --- a/dbs/archives.js +++ b/dbs/archives.js @@ -7,64 +7,28 @@ const jetpack = require('fs-jetpack') const {InvalidArchiveKeyError} = require('beaker-error-constants') const db = require('./profile-data-db') const lock = require('../lib/lock') -const { - DAT_HASH_REGEX, - DAT_GC_EXPIRATION_AGE -} = require('../lib/const') +const {DAT_HASH_REGEX} = require('../lib/const') // typedefs // = /** - * @typedef {import('../dat/library').InternalDatArchive} InternalDatArchive - * - * @typedef {Object} LibraryArchiveRecord - * @prop {string} key - * @prop {string} url - * @prop {string?} domain - * @prop {string} title - * @prop {string} description - * @prop {Array} type - * @prop {number} mtime - * @prop {number} size - * @prop {boolean} isOwner - * @prop {number} lastAccessTime - * @prop {number} lastLibraryAccessTime - * @prop {Object} userSettings - * @prop {boolean} userSettings.isSaved - * @prop {boolean} userSettings.hidden - * @prop {boolean} userSettings.networked - * @prop {boolean} userSettings.autoDownload - * @prop {boolean} userSettings.autoUpload - * @prop {number} userSettings.expiresAt - * @prop {string} userSettings.localSyncPath - * @prop {boolean} userSettings.previewMode + * @typedef {import('../dat/daemon').DaemonDatArchive} DaemonDatArchive * * @typedef {Object} LibraryArchiveMeta * @prop {string} key + * @prop {string} url * @prop {string} title * @prop {string} description - * @prop {string | Array} type - * @prop {Array} installedNames + * @prop {string} type * @prop {number} mtime * @prop {number} size + * @prop {string} author + * @prop {string} forkOf * @prop {boolean} isOwner * @prop {number} lastAccessTime * @prop {number} lastLibraryAccessTime * - * @typedef {Object} LibraryArchiveUserSettings - * @prop {number} profileId - * @prop {string} key - * @prop {boolean} isSaved - * @prop {boolean} hidden - * @prop {boolean} networked - * @prop {boolean} autoDownload - * @prop {boolean} autoUpload - * @prop {number} expiresAt - * @prop {string} localSyncPath - * @prop {boolean} previewMode - * @prop {number} createdAt - * * @typedef {Object} MinimalLibraryArchiveRecord * @prop {string} key */ @@ -97,7 +61,7 @@ exports.getDatPath = function () { /** * @description Get the path to an archive's files. - * @param {string | Buffer | InternalDatArchive} archiveOrKey + * @param {string | Buffer | DaemonDatArchive} archiveOrKey * @returns {string} */ // @@ -113,23 +77,6 @@ const getArchiveMetaPath = exports.getArchiveMetaPath = function (archiveOrKey) return path.join(datPath, 'Archives', 'Meta', key.slice(0, 2), key.slice(2)) } -/** - * @description Get the path to an archive's temporary local sync path. - * @param {string | Buffer | InternalDatArchive} archiveOrKey - * @returns {string} - */ -const getInternalLocalSyncPath = exports.getInternalLocalSyncPath = function (archiveOrKey) { - var key /** @type string */ - if (typeof archiveOrKey === 'string') { - key = archiveOrKey - } else if (Buffer.isBuffer(archiveOrKey)) { - key = datEncoding.toStr(archiveOrKey) - } else { - key = datEncoding.toStr(archiveOrKey.key) - } - return path.join(datPath, 'Archives', 'LocalCopy', key.slice(0, 2), key.slice(2)) -} - /** * @description Delete all db entries and files for an archive. * @param {string} key @@ -141,9 +88,7 @@ exports.deleteArchive = async function (key) { await Promise.all([ db.run(`DELETE FROM archives WHERE key=?`, key), db.run(`DELETE FROM archives_meta WHERE key=?`, key), - db.run(`DELETE FROM archives_meta_type WHERE key=?`, key), - jetpack.removeAsync(path), - jetpack.removeAsync(getInternalLocalSyncPath(key)) + jetpack.removeAsync(path) ]) return info ? info.size : 0 } @@ -152,166 +97,6 @@ exports.on = events.on.bind(events) exports.addListener = events.addListener.bind(events) exports.removeListener = events.removeListener.bind(events) -// exported methods: archive user settings -// = - -/** - * @description Get an array of saved archives. - * @param {number} profileId - * @param {Object} [query] - * @param {string} [query.key] - * @param {boolean} [query.isSaved] - * @param {boolean} [query.isNetworked] - * @param {boolean} [query.isOwner] - * @param {boolean} [query.showHidden] - * @param {string} [query.type] - * @param {string} [query.string] - * @returns {Promise>} - */ -exports.query = async function (profileId, query = {}) { - // fetch archive meta - var values = [] - var whereList = [] - if (query.isOwner === true) whereList.push('archives_meta.isOwner = 1') - if (query.isOwner === false) whereList.push('archives_meta.isOwner = 0') - if (query.isNetworked === true) whereList.push('archives.networked = 1') - if (query.isNetworked === false) whereList.push('archives.networked = 0') - if ('isSaved' in query) { - if (query.isSaved) { - whereList.push('archives.profileId = ?') - values.push(profileId) - whereList.push('archives.isSaved = 1') - } else { - whereList.push('(archives.isSaved = 0 OR archives.isSaved IS NULL)') - } - } - if (typeof query.key !== 'undefined') { - whereList.push('archives_meta.key = ?') - values.push(query.key) - } - if (!query.showHidden) whereList.push('(archives.hidden = 0 OR archives.hidden IS NULL)') - var WHERE = whereList.length ? `WHERE ${whereList.join(' AND ')}` : '' - - var archives = await db.all(` - SELECT - archives_meta.*, - GROUP_CONCAT(archives_meta_type.type) AS type, - archives.isSaved, - archives.hidden, - archives.networked, - archives.autoDownload, - archives.autoUpload, - archives.expiresAt, - archives.localSyncPath, - archives.previewMode, - dat_dns.name as domain - FROM archives_meta - LEFT JOIN archives ON archives.key = archives_meta.key - LEFT JOIN archives_meta_type ON archives_meta_type.key = archives_meta.key - LEFT JOIN dat_dns ON dat_dns.key = archives_meta.key AND dat_dns.isCurrent = 1 - ${WHERE} - GROUP BY archives_meta.key - `, values) - - // massage the output - archives.forEach(archive => { - archive.url = `dat://${archive.domain || archive.key}` - archive.isOwner = archive.isOwner != 0 - archive.type = archive.type ? archive.type.split(',') : [] - archive.userSettings = { - isSaved: archive.isSaved == 1, - hidden: archive.hidden == 0, - networked: archive.networked == 1, - autoDownload: archive.autoDownload == 1, - autoUpload: archive.autoUpload == 1, - expiresAt: archive.expiresAt, - localSyncPath: archive.localSyncPath, - previewMode: archive.previewMode == 1 - } - - // user settings - delete archive.isSaved - delete archive.hidden - delete archive.networked - delete archive.autoDownload - delete archive.autoUpload - delete archive.expiresAt - delete archive.localSyncPath - delete archive.previewMode - - // deprecated attrs - delete archive.createdByTitle - delete archive.createdByUrl - delete archive.forkOf - delete archive.metaSize - delete archive.stagingSize - delete archive.stagingSizeLessIgnored - }) - - // apply manual filters - if ('type' in query) { - let types = Array.isArray(query.type) ? query.type : [query.type] - archives = archives.filter((/** @type LibraryArchiveRecord */ a) => { - for (let type of types) { - if (a.type.indexOf(type) === -1) { - return false - } - } - return true - }) - } - - return ('key' in query) ? archives[0] : archives -} - -/** - * @description Get all archives that should be unsaved. - * @returns {Promise>} - */ -exports.listExpiredArchives = async function () { - return db.all(` - SELECT archives.key - FROM archives - WHERE - archives.isSaved = 1 - AND archives.expiresAt != 0 - AND archives.expiresAt IS NOT NULL - AND archives.expiresAt < ? - `, [Date.now()]) -} - -/** - * @description Get all archives that are ready for garbage collection. - * @param {Object} [opts] - * @param {number} [opts.olderThan] - * @param {boolean} [opts.isOwner] - * @returns {Promise>} - */ -exports.listGarbageCollectableArchives = async function ({olderThan, isOwner} = {}) { - olderThan = typeof olderThan === 'number' ? olderThan : DAT_GC_EXPIRATION_AGE - var isOwnerClause = typeof isOwner === 'boolean' ? `AND archives_meta.isOwner = ${isOwner ? '1' : '0'}` : '' - - // fetch archives - var records = await db.all(` - SELECT archives_meta.key - FROM archives_meta - LEFT JOIN archives ON archives_meta.key = archives.key - WHERE - (archives.isSaved != 1 OR archives.isSaved IS NULL) - AND archives_meta.lastAccessTime < ? - ${isOwnerClause} - `, [Date.now() - olderThan]) - var records2 = records.slice() - - // fetch any related drafts - for (let record of records2) { - let drafts = await db.all(`SELECT draftKey as key FROM archive_drafts WHERE masterKey = ? ORDER BY createdAt`, [record.key]) - records = records.concat(drafts) - } - - return records -} - /** * @description Upsert the last-access time. * @param {string | Buffer} key @@ -335,200 +120,75 @@ exports.touch = async function (key, timeVar = 'lastAccessTime', value = -1) { } /** - * @description - * Get a single archive's user settings. - * (Returns an empty object on not found.) - * @param {number} profileId - * @param {string | Buffer} key - * @returns {Promise} + * @param {string} key + * @returns {Promise} */ -const getUserSettings = exports.getUserSettings = async function (profileId, key) { +exports.hasMeta = async function (key) { // massage inputs var keyStr = typeof key !== 'string' ? datEncoding.toStr(key) : key - - // validate inputs if (!DAT_HASH_REGEX.test(keyStr)) { - throw new InvalidArchiveKeyError() + try { + keyStr = await require('../dat/dns').resolveName(keyStr) + } catch (e) { + return false + } } // fetch - try { - var settings = await db.get(` - SELECT * FROM archives WHERE profileId = ? AND key = ? - `, [profileId, keyStr]) - settings.isSaved = !!settings.isSaved - settings.hidden = !!settings.hidden - settings.networked = !!settings.networked - settings.autoDownload = !!settings.autoDownload - settings.autoUpload = !!settings.autoUpload - settings.previewMode = Number(settings.previewMode) === 1 - return /** @type LibraryArchiveUserSettings */(settings) - } catch (e) { - return /** @type LibraryArchiveUserSettings */({}) - } -} - -/** - * @description Write an archive's user setting. - * @param {number} profileId - * @param {string | Buffer} key - * @param {Object} [newValues] - * @param {boolean} [newValues.isSaved] - * @param {boolean} [newValues.hidden] - * @param {boolean} [newValues.networked] - * @param {boolean} [newValues.autoDownload] - * @param {boolean} [newValues.autoUpload] - * @param {number} [newValues.expiresAt] - * @param {string} [newValues.localSyncPath] - * @param {boolean} [newValues.previewMode] - * @returns {Promise} - */ -exports.setUserSettings = async function (profileId, key, newValues = {}) { - // massage inputs - var keyStr = datEncoding.toStr(key) - - // validate inputs - if (!DAT_HASH_REGEX.test(keyStr)) { - throw new InvalidArchiveKeyError() - } - - var release = await lock('archives-db') - try { - // fetch current - var value = await getUserSettings(profileId, keyStr) - - if (!value || typeof value.key === 'undefined') { - // create - value = /** @type LibraryArchiveUserSettings */ ({ - profileId, - key: keyStr, - isSaved: newValues.isSaved, - hidden: newValues.hidden, - networked: ('networked' in newValues) ? newValues.networked : true, - autoDownload: ('autoDownload' in newValues) ? newValues.autoDownload : newValues.isSaved, - autoUpload: ('autoUpload' in newValues) ? newValues.autoUpload : newValues.isSaved, - expiresAt: newValues.expiresAt, - localSyncPath: (newValues.localSyncPath) ? newValues.localSyncPath : '', - previewMode: ('previewMode' in newValues) ? newValues.previewMode : '' - }) - let valueArray = [ - profileId, - keyStr, - flag(value.isSaved), - flag(value.hidden), - flag(value.networked), - flag(value.autoDownload), - flag(value.autoUpload), - value.expiresAt, - value.localSyncPath, - flag(value.previewMode) - ] - await db.run(` - INSERT INTO archives - ( - profileId, - key, - isSaved, - hidden, - networked, - autoDownload, - autoUpload, - expiresAt, - localSyncPath, - previewMode - ) - VALUES (${valueArray.map(_ => '?').join(', ')}) - `, valueArray) - } else { - // update - let { isSaved, hidden, networked, autoDownload, autoUpload, expiresAt, localSyncPath, previewMode } = newValues - if (typeof isSaved === 'boolean') value.isSaved = isSaved - if (typeof hidden === 'boolean') value.hidden = hidden - if (typeof networked === 'boolean') value.networked = networked - if (typeof autoDownload === 'boolean') value.autoDownload = autoDownload - if (typeof autoUpload === 'boolean') value.autoUpload = autoUpload - if (typeof expiresAt === 'number') value.expiresAt = expiresAt - if (typeof localSyncPath === 'string') value.localSyncPath = localSyncPath - if (typeof previewMode === 'boolean') value.previewMode = previewMode - let valueArray = [ - flag(value.isSaved), - flag(value.hidden), - flag(value.networked), - flag(value.autoDownload), - flag(value.autoUpload), - value.expiresAt, - value.localSyncPath, - flag(value.previewMode), - profileId, - keyStr - ] - await db.run(` - UPDATE archives - SET - isSaved = ?, - hidden = ?, - networked = ?, - autoDownload = ?, - autoUpload = ?, - expiresAt = ?, - localSyncPath = ?, - previewMode = ? - WHERE - profileId = ? AND key = ? - `, valueArray) - } - - events.emit('update:archive-user-settings', keyStr, value, newValues) - return value - } finally { - release() - } + var meta = await db.get(` + SELECT + archives_meta.key + FROM archives_meta + WHERE archives_meta.key = ? + `, [keyStr]) + return !!meta } -// exported methods: archive meta -// = - /** * @description * Get a single archive's metadata. * Returns an empty object on not-found. * @param {string | Buffer} key + * @param {Object} [opts] + * @param {boolean} [opts.noDefault] * @returns {Promise} */ -const getMeta = exports.getMeta = async function (key) { +const getMeta = exports.getMeta = async function (key, {noDefault} = {noDefault: false}) { // massage inputs var keyStr = typeof key !== 'string' ? datEncoding.toStr(key) : key + var origKeyStr = keyStr // validate inputs if (!DAT_HASH_REGEX.test(keyStr)) { - keyStr = await require('../dat/dns').resolveName(keyStr) + try { + keyStr = await require('../dat/dns').resolveName(keyStr) + } catch (e) { + return noDefault ? undefined : defaultMeta(keyStr, origKeyStr) + } } // fetch var meta = await db.get(` SELECT archives_meta.*, - GROUP_CONCAT(archives_meta_type.type) AS type, - GROUP_CONCAT(apps.name) as installedNames + dat_dns.name as dnsName FROM archives_meta - LEFT JOIN archives_meta_type ON archives_meta_type.key = archives_meta.key - LEFT JOIN apps ON apps.url = ('dat://' || archives_meta.key) + LEFT JOIN dat_dns ON dat_dns.key = archives_meta.key AND dat_dns.isCurrent = 1 WHERE archives_meta.key = ? GROUP BY archives_meta.key `, [keyStr]) if (!meta) { - return defaultMeta(keyStr) + return noDefault ? undefined : defaultMeta(keyStr, origKeyStr) } // massage some values + meta.url = `dat://${meta.dnsName || meta.key}` meta.isOwner = !!meta.isOwner - meta.type = meta.type ? meta.type.split(',') : [] - meta.installedNames = meta.installedNames ? meta.installedNames.split(',') : [] + delete meta.dnsName // remove old attrs delete meta.createdByTitle delete meta.createdByUrl - delete meta.forkOf delete meta.metaSize delete meta.stagingSize delete meta.stagingSizeLessIgnored @@ -555,12 +215,13 @@ exports.setMeta = async function (key, value) { } // extract the desired values - var {title, description, type, size, mtime, isOwner} = value + var {title, description, type, size, author, forkOf, mtime, isOwner} = value title = typeof title === 'string' ? title : '' description = typeof description === 'string' ? description : '' - if (typeof type === 'string') type = type.split(' ') - else if (Array.isArray(type)) type = type.filter(v => v && typeof v === 'string') + type = typeof type === 'string' ? type : '' var isOwnerFlag = flag(isOwner) + if (typeof author === 'string') author = normalizeDatUrl(author) + if (typeof forkOf === 'string') forkOf = normalizeDatUrl(forkOf) // write var release = await lock('archives-db:meta') @@ -568,55 +229,36 @@ exports.setMeta = async function (key, value) { try { await db.run(` INSERT OR REPLACE INTO - archives_meta (key, title, description, mtime, size, isOwner, lastAccessTime, lastLibraryAccessTime) - VALUES (?, ?, ?, ?, ?, ?, ?, ?) - `, [keyStr, title, description, mtime, size, isOwnerFlag, lastAccessTime, lastLibraryAccessTime]) - await db.run(`DELETE FROM archives_meta_type WHERE key=?`, keyStr) - if (type) { - await Promise.all(type.map(t => ( - db.run(`INSERT INTO archives_meta_type (key, type) VALUES (?, ?)`, [keyStr, t]) - ))) - } + archives_meta (key, title, description, type, mtime, size, author, forkOf, isOwner, lastAccessTime, lastLibraryAccessTime) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + `, [keyStr, title, description, type, mtime, size, author, forkOf, isOwnerFlag, lastAccessTime, lastLibraryAccessTime]) } finally { release() } events.emit('update:archive-meta', keyStr, value) } -/** - * @description Find the archive currently using a given localSyncPath. - * @param {number} profileId - * @param {string} localSyncPath - * @returns {Promise} - */ -exports.getByLocalSyncPath = async function (profileId, localSyncPath) { - try { - return await db.get(` - SELECT key FROM archives WHERE profileId = ? AND localSyncPath = ? - `, [profileId, localSyncPath]) - } catch (e) { - return null - } -} - // internal methods // = /** * @param {string} key + * @param {string} name * @returns {LibraryArchiveMeta} */ -function defaultMeta (key) { +function defaultMeta (key, name) { return { key, - title: null, - description: null, - type: [], + url: `dat://${name}`, + title: undefined, + description: undefined, + type: undefined, + author: undefined, + forkOf: undefined, mtime: 0, isOwner: false, lastAccessTime: 0, lastLibraryAccessTime: 0, - installedNames: [], size: 0 } } @@ -638,3 +280,11 @@ exports.extractOrigin = function (originURL) { if (!urlp || !urlp.host || !urlp.protocol) return return (urlp.protocol + (urlp.slashes ? '//' : '') + urlp.host) } + +function normalizeDatUrl (url) { + var match = url.match(DAT_HASH_REGEX) + if (match) { + return `dat://${match[0]}` + } + return exports.extractOrigin(url) +} \ No newline at end of file diff --git a/dbs/bookmarks.js b/dbs/bookmarks.js deleted file mode 100644 index 3ef10f4b..00000000 --- a/dbs/bookmarks.js +++ /dev/null @@ -1,292 +0,0 @@ -const assert = require('assert') -const EventEmitter = require('events') -const db = require('./profile-data-db') -const normalizeUrl = require('normalize-url') -const lock = require('../lib/lock') -const knex = require('../lib/knex') - -const NORMALIZE_OPTS = { - stripFragment: false, - stripWWW: false, - removeQueryParameters: false, - removeTrailingSlash: false -} - -// typedefs -// = - -/** - * @typedef {Object} Bookmark - * @prop {number} createdAt - * @prop {string} href - * @prop {string} title - * @prop {string} description - * @prop {string[]} tags - * @prop {boolean} pinned - * @prop {boolean} isPublic - * @prop {number} pinOrder - */ - -// globals -// = - -const events = new EventEmitter() - -// exported methods -// = - -exports.on = events.on.bind(events) -exports.once = events.once.bind(events) -exports.removeListener = events.removeListener.bind(events) - -/** - * @param {number} profileId - * @param {Object} values - * @param {string} [values.href] - * @param {string} [values.title] - * @param {string} [values.description] - * @param {string | string[]} [values.tags] - * @param {boolean} [values.pinned] - * @param {boolean} [values.isPublic] - * @returns {Promise} - */ -exports.addBookmark = async function (profileId, {href, title, description, tags, pinned, isPublic} = {}) { - // validate - assertValidHref(href) - assertValidTitle(title) - assertValidDescription(description) - assertValidTags(tags) - - // massage values - href = normalizeUrl(href, NORMALIZE_OPTS) - var tagsStr = tagsToString(tags) - description = description || '' - isPublic = isPublic || false - - // update record - var release = await lock(`bookmarksdb`) - try { - await db.run(` - INSERT OR REPLACE - INTO bookmarks (profileId, url, title, description, tags, pinned, isPublic) - VALUES (?, ?, ?, ?, ?, ?, ?) - `, [profileId, href, title, description, tagsStr, Number(pinned), Number(isPublic)]) - events.emit('changed') - } finally { - release() - } -} - -/** - * @param {number} profileId - * @param {string} bookmarkHref - * @param {Object} values - * @param {string} [values.href] - * @param {string} [values.title] - * @param {string} [values.description] - * @param {string | string[]} [values.tags] - * @param {boolean} [values.pinned] - * @param {boolean} [values.isPublic] - * @returns {Promise} - */ -exports.editBookmark = async function (profileId, bookmarkHref, {href, title, description, tags, pinned, isPublic} = {}) { - // validate - assertValidHref(bookmarkHref) - if (href) assertValidHref(href) - if (title) assertValidTitle(title) - if (description) assertValidDescription(description) - if (tags) assertValidTags(tags) - - // massage values - bookmarkHref = normalizeUrl(bookmarkHref, NORMALIZE_OPTS) - href = href ? normalizeUrl(href, NORMALIZE_OPTS) : undefined - var tagsStr = tags ? tagsToString(tags) : undefined - - // read, update, store - var release = await lock(`bookmarksdb`) - try { - var oldBookmark = await db.get(`SELECT url, title, pinned, pinOrder FROM bookmarks WHERE profileId = ? AND url = ?`, [profileId, bookmarkHref]) - - if (oldBookmark) { - // update record - let sql = knex('bookmarks') - .where({profileId, url: bookmarkHref}) - if (typeof href !== 'undefined') sql = sql.update('url', href) - if (typeof title !== 'undefined') sql = sql.update('title', title) - if (typeof description !== 'undefined') sql = sql.update('description', description) - if (typeof tagsStr !== 'undefined') sql = sql.update('tags', tagsStr) - if (typeof pinned !== 'undefined') sql = sql.update('pinned', Number(pinned)) - if (typeof isPublic !== 'undefined') sql = sql.update('isPublic', Number(isPublic)) - await db.run(sql) - } else { - // insert record - await db.run(` - INSERT OR REPLACE - INTO bookmarks (profileId, url, title, description, tags, pinned, isPublic) - VALUES (?, ?, ?, ?, ?, ?, ?) - `, [profileId, href, title, description || '', tagsStr, Number(pinned), Number(isPublic)]) - } - events.emit('changed') - } finally { - release() - } -} - -/** - * @param {number} profileId - * @param {string} href - * @returns {Promise} - */ -exports.removeBookmark = async function (profileId, href) { - href = normalizeUrl(href, NORMALIZE_OPTS) - var release = await lock(`bookmarksdb`) - try { - await db.run(`DELETE FROM bookmarks WHERE profileId = ? AND url = ?`, [profileId, href]) - events.emit('changed') - } finally { - release() - } -} - -/** - * @param {number} profileId - * @param {string[]} urls - * @returns {Promise} - */ -exports.setBookmarkPinOrder = async function (profileId, urls) { - var len = urls.length - var release = await lock(`bookmarksdb`) - try { - await Promise.all(urls.map((url, i) => ( - db.run(`UPDATE bookmarks SET pinOrder = ? WHERE profileId = ? AND url = ?`, [len - i, profileId, url]) - ))) - } finally { - release() - } -} - -/** - * @param {number} profileId - * @param {string} url - * @returns {Promise} - */ -exports.getBookmark = async function (profileId, href) { - href = normalizeUrl(href, NORMALIZE_OPTS) - return toNewFormat(await db.get(`SELECT * FROM bookmarks WHERE profileId = ? AND url = ?`, [profileId, href])) -} - -/** - * @param {number} profileId - * @param {Object} [opts] - * @param {Object} [opts.filters] - * @param {boolean} [opts.filters.pinned] - * @param {boolean} [opts.filters.isPublic] - * @returns {Promise>} - */ -exports.listBookmarks = async function (profileId, {filters} = {}) { - let sql = knex('bookmarks') - .select('url') - .select('title') - .select('description') - .select('tags') - .select('pinned') - .select('isPublic') - .select('pinOrder') - .select('createdAt') - .where('profileId', '=', profileId) - .orderBy('createdAt', 'DESC') - if (filters && filters.pinned) { - sql = sql.where('pinned', '=', '1') - } - if (filters && 'isPublic' in filters) { - sql = sql.where('isPublic', '=', filters.isPublic ? '1' : '0') - } - - var bookmarks = await db.all(sql) - return bookmarks.map(toNewFormat) -} - -/** - * @param {number} profileId - * @returns {Promise>} - */ -exports.listBookmarkTags = async function (profileId) { - var tagSet = new Set() - var bookmarks = await db.all(`SELECT tags FROM bookmarks WHERE profileId = ?`, [profileId]) - bookmarks.forEach(b => { - if (b.tags) { - b.tags.split(' ').forEach(t => tagSet.add(t)) - } - }) - return Array.from(tagSet) -} - -/** - * @param {string | string[]} v - * @returns {string} - */ -function tagsToString (v) { - if (Array.isArray(v)) { - v = v.join(' ') - } - if (typeof v === 'string') { - v = v.replace(/,/g, ' ') // convert any commas to spaces - } - return v -} - -/** - * @param {Object} b - * @returns {Bookmark | null} - */ -function toNewFormat (b) { - if (!b) return null - return { - createdAt: b.createdAt * 1e3, // convert to ms - href: b.url, - title: b.title, - description: b.description, - tags: b.tags ? b.tags.split(' ').filter(Boolean) : [], - pinned: !!b.pinned, - isPublic: !!b.isPublic, - pinOrder: b.pinOrder - } -} - -/** - * @param {string} v - * @returns {void} - */ -function assertValidHref (v) { - assert(v && typeof v === 'string', 'href must be a valid URL') -} - -/** - * @param {string} v - * @returns {void} - */ -function assertValidTitle (v) { - assert(v && typeof v === 'string', 'title must be a non-empty string') -} - -/** - * @param {string} v - * @returns {void} - */ -function assertValidDescription (v) { - if (!v) return // optional - assert(typeof v === 'string', 'title must be a non-empty string') -} - -/** - * @param {string|string[]} v - * @returns {void} - */ -function assertValidTags (v) { - if (!v) return // optional - if (Array.isArray(v)) { - assert(v.every(item => typeof item === 'string'), 'tags must be a string or array or strings') - } else { - assert(typeof v === 'string', 'tags must be a string or array or strings') - } -} diff --git a/dbs/dat-dns.js b/dbs/dat-dns.js index 19a303e3..93899148 100644 --- a/dbs/dat-dns.js +++ b/dbs/dat-dns.js @@ -56,7 +56,7 @@ exports.update = async function ({key, name}) { if (old && old.key !== key) { // unset old await db.run(knex('dat_dns').update({isCurrent: 0}).where({name})) - events.emit('update', {key: old.key, name: undefined}) + events.emit('updated', {key: old.key, name: undefined}) } let curr = await db.get(knex('dat_dns').where({name, key})) @@ -73,7 +73,7 @@ exports.update = async function ({key, name}) { // update current await db.run(knex('dat_dns').update({lastConfirmedAt: Date.now(), isCurrent: 1}).where({name, key})) } - events.emit('update', {key, name}) + events.emit('updated', {key, name}) } finally { release() } @@ -87,7 +87,7 @@ exports.unset = async function (key) { var curr = await db.get(knex('dat_dns').where({key, isCurrent: 1})) if (curr) { await db.run(knex('dat_dns').update({isCurrent: 0}).where({key})) - events.emit('update', {key, name: undefined}) + events.emit('updated', {key, name: undefined}) } } diff --git a/dbs/index.js b/dbs/index.js index a1e5fed6..884bf3e5 100644 --- a/dbs/index.js +++ b/dbs/index.js @@ -1,11 +1,8 @@ module.exports = { archives: require('./archives'), - archiveDrafts: require('./archive-drafts'), - bookmarks: require('./bookmarks'), history: require('./history'), profileData: require('./profile-data-db'), settings: require('./settings'), sitedata: require('./sitedata'), - templates: require('./templates'), watchlist: require('./watchlist') } diff --git a/dbs/profile-data-db.js b/dbs/profile-data-db.js index 9e3b6641..7dc7a744 100644 --- a/dbs/profile-data-db.js +++ b/dbs/profile-data-db.js @@ -123,7 +123,13 @@ migrations = [ migration('profile-data.v33.sql'), migration('profile-data.v34.sql'), migration('profile-data.v35.sql'), - migration('profile-data.v36.sql') + migration('profile-data.v36.sql'), + migration('profile-data.v37.sql'), + migration('profile-data.v38.sql'), + migration('profile-data.v39.sql'), + migration('profile-data.v40.sql'), + migration('profile-data.v41.sql'), + migration('profile-data.v42.sql') ] function migration (file, opts = {}) { return cb => { diff --git a/dbs/schemas/profile-data.sql.js b/dbs/schemas/profile-data.sql.js index 9adff13b..706b2f04 100644 --- a/dbs/schemas/profile-data.sql.js +++ b/dbs/schemas/profile-data.sql.js @@ -24,46 +24,37 @@ CREATE TABLE user_site_sessions ( FOREIGN KEY (userId) REFERENCES users (id) ON DELETE CASCADE ); -CREATE TABLE archives ( - profileId INTEGER NOT NULL, - key TEXT NOT NULL, -- dat key - - previewMode INTEGER, -- automatically publish changes (0) or write to local folder (1) - localSyncPath TEXT, -- custom local folder that the data is synced to - - isSaved INTEGER, -- is this archive saved to our library? - hidden INTEGER DEFAULT 0, -- should this archive be hidden in the library or select-archive modals? (this is useful for internal dats, such as drafts) - networked INTEGER DEFAULT 1, -- join the swarm (1) or do not swarm (0) - autoDownload INTEGER DEFAULT 1, -- watch and download all available data (1) or sparsely download on demand (0) - autoUpload INTEGER DEFAULT 1, -- join the swarm at startup (1) or only swarm when visiting (0) - expiresAt INTEGER, -- change autoUpload to 0 at this time (used for temporary seeding) - createdAt INTEGER DEFAULT (strftime('%s', 'now')), - - localPath TEXT, -- deprecated - autoPublishLocal INTEGER DEFAULT 0 -- deprecated -- watch localSyncPath and automatically publish changes (1) or not (0) -); - CREATE TABLE archives_meta ( key TEXT PRIMARY KEY, title TEXT, description TEXT, + type TEXT, mtime INTEGER, size INTEGER, + author TEXT, + forkOf TEXT, isOwner INTEGER, lastAccessTime INTEGER DEFAULT 0, lastLibraryAccessTime INTEGER DEFAULT 0, - forkOf TEXT, -- deprecated createdByUrl TEXT, -- deprecated createdByTitle TEXT, -- deprecated metaSize INTEGER, -- deprecated stagingSize INTEGER -- deprecated ); +CREATE VIRTUAL TABLE archives_meta_fts_index USING fts5(title, description, content='archives_meta'); -CREATE TABLE archives_meta_type ( - key TEXT, - type TEXT -); +-- triggers to keep archives_meta_fts_index updated +CREATE TRIGGER archives_meta_ai AFTER INSERT ON archives_meta BEGIN + INSERT INTO archives_meta_fts_index(rowid, title, description) VALUES (new.rowid, new.title, new.description); +END; +CREATE TRIGGER archives_meta_ad AFTER DELETE ON archives_meta BEGIN + INSERT INTO archives_meta_fts_index(archives_meta_fts_index, rowid, title, description) VALUES('delete', old.rowid, old.title, old.description); +END; +CREATE TRIGGER archives_meta_au AFTER UPDATE ON archives_meta BEGIN + INSERT INTO archives_meta_fts_index(archives_meta_fts_index, rowid, title, description) VALUES('delete', old.rowid, old.title, old.description); + INSERT INTO archives_meta_fts_index(rowid, title, description) VALUES (new.rowid, new.title, new.description); +END; CREATE TABLE dat_dns ( id INTEGER PRIMARY KEY, @@ -76,22 +67,6 @@ CREATE TABLE dat_dns ( CREATE INDEX dat_dns_name ON dat_dns (name); CREATE INDEX dat_dns_key ON dat_dns (key); -CREATE TABLE bookmarks ( - profileId INTEGER, - url TEXT NOT NULL, - title TEXT, - description TEXT, - isPublic INTEGER, - pinned INTEGER, - pinOrder INTEGER DEFAULT 0, - createdAt INTEGER DEFAULT (strftime('%s', 'now')), - tags TEXT, - notes TEXT, - - PRIMARY KEY (profileId, url), - FOREIGN KEY (profileId) REFERENCES profiles (id) ON DELETE CASCADE -); - CREATE TABLE visits ( profileId INTEGER, url TEXT NOT NULL, @@ -136,23 +111,12 @@ CREATE TABLE watchlist ( FOREIGN KEY (profileId) REFERENCES profiles (id) ON DELETE CASCADE ); --- list of the users current templates -CREATE TABLE templates ( - profileId INTEGER, - url TEXT NOT NULL, - title TEXT, - screenshot, - createdAt INTEGER DEFAULT (strftime('%s', 'now')), - - PRIMARY KEY (profileId, url), - FOREIGN KEY (profileId) REFERENCES profiles (id) ON DELETE CASCADE -); - -- list of sites being crawled CREATE TABLE crawl_sources ( id INTEGER PRIMARY KEY NOT NULL, url TEXT NOT NULL, - datDnsId INTEGER + datDnsId INTEGER, + isPrivate INTEGER ); -- tracking information on the crawl-state of the sources @@ -166,40 +130,14 @@ CREATE TABLE crawl_sources_meta ( FOREIGN KEY (crawlSourceId) REFERENCES crawl_sources (id) ON DELETE CASCADE ); --- crawled descriptions of other sites -CREATE TABLE crawl_site_descriptions ( - crawlSourceId INTEGER NOT NULL, - crawledAt INTEGER, - - url TEXT, - title TEXT, - description TEXT, - type TEXT, -- comma separated strings - - FOREIGN KEY (crawlSourceId) REFERENCES crawl_sources (id) ON DELETE CASCADE -); -CREATE VIRTUAL TABLE crawl_site_descriptions_fts_index USING fts5(title, description, content='crawl_site_descriptions'); - --- triggers to keep crawl_site_descriptions_fts_index updated -CREATE TRIGGER crawl_site_descriptions_ai AFTER INSERT ON crawl_site_descriptions BEGIN - INSERT INTO crawl_site_descriptions_fts_index(rowid, title, description) VALUES (new.rowid, new.title, new.description); -END; -CREATE TRIGGER crawl_site_descriptions_ad AFTER DELETE ON crawl_site_descriptions BEGIN - INSERT INTO crawl_site_descriptions_fts_index(crawl_site_descriptions_fts_index, rowid, title, description) VALUES('delete', old.rowid, old.title, old.description); -END; -CREATE TRIGGER crawl_site_descriptions_au AFTER UPDATE ON crawl_site_descriptions BEGIN - INSERT INTO crawl_site_descriptions_fts_index(crawl_site_descriptions_fts_index, rowid, title, description) VALUES('delete', old.rowid, old.title, old.description); - INSERT INTO crawl_site_descriptions_fts_index(rowid, title, description) VALUES (new.rowid, new.title, new.description); -END; - -- crawled tags CREATE TABLE crawl_tags ( id INTEGER PRIMARY KEY, tag TEXT UNIQUE ); --- crawled posts -CREATE TABLE crawl_posts ( +-- crawled statuses +CREATE TABLE crawl_statuses ( crawlSourceId INTEGER NOT NULL, pathname TEXT NOT NULL, crawledAt INTEGER, @@ -210,18 +148,18 @@ CREATE TABLE crawl_posts ( FOREIGN KEY (crawlSourceId) REFERENCES crawl_sources (id) ON DELETE CASCADE ); -CREATE VIRTUAL TABLE crawl_posts_fts_index USING fts5(body, content='crawl_posts'); +CREATE VIRTUAL TABLE crawl_statuses_fts_index USING fts5(body, content='crawl_statuses'); --- triggers to keep crawl_posts_fts_index updated -CREATE TRIGGER crawl_posts_ai AFTER INSERT ON crawl_posts BEGIN - INSERT INTO crawl_posts_fts_index(rowid, body) VALUES (new.rowid, new.body); +-- triggers to keep crawl_statuses_fts_index updated +CREATE TRIGGER crawl_statuses_ai AFTER INSERT ON crawl_statuses BEGIN + INSERT INTO crawl_statuses_fts_index(rowid, body) VALUES (new.rowid, new.body); END; -CREATE TRIGGER crawl_posts_ad AFTER DELETE ON crawl_posts BEGIN - INSERT INTO crawl_posts_fts_index(crawl_posts_fts_index, rowid, body) VALUES('delete', old.rowid, old.body); +CREATE TRIGGER crawl_statuses_ad AFTER DELETE ON crawl_statuses BEGIN + INSERT INTO crawl_statuses_fts_index(crawl_statuses_fts_index, rowid, body) VALUES('delete', old.rowid, old.body); END; -CREATE TRIGGER crawl_posts_au AFTER UPDATE ON crawl_posts BEGIN - INSERT INTO crawl_posts_fts_index(crawl_posts_fts_index, rowid, body) VALUES('delete', old.rowid, old.body); - INSERT INTO crawl_posts_fts_index(rowid, body) VALUES (new.rowid, new.body); +CREATE TRIGGER crawl_statuses_au AFTER UPDATE ON crawl_statuses BEGIN + INSERT INTO crawl_statuses_fts_index(crawl_statuses_fts_index, rowid, body) VALUES('delete', old.rowid, old.body); + INSERT INTO crawl_statuses_fts_index(rowid, body) VALUES (new.rowid, new.body); END; -- crawled comments @@ -260,7 +198,7 @@ CREATE TABLE crawl_reactions ( crawledAt INTEGER, topic TEXT NOT NULL, - emojis TEXT NOT NULL, + phrases TEXT NOT NULL, PRIMARY KEY (crawlSourceId, pathname), FOREIGN KEY (crawlSourceId) REFERENCES crawl_sources (id) ON DELETE CASCADE @@ -332,85 +270,69 @@ CREATE TABLE crawl_follows ( FOREIGN KEY (crawlSourceId) REFERENCES crawl_sources (id) ON DELETE CASCADE ); --- crawled discussions -CREATE TABLE crawl_discussions ( - id INTEGER PRIMARY KEY, +-- crawled dats +CREATE TABLE crawl_dats ( crawlSourceId INTEGER NOT NULL, - pathname TEXT NOT NULL, crawledAt INTEGER, - title TEXT NOT NULL, - body TEXT, - href TEXT, - createdAt INTEGER, - updatedAt INTEGER, + key TEXT NOT NULL, + title TEXT, + description TEXT, + type TEXT, + PRIMARY KEY (crawlSourceId, key), FOREIGN KEY (crawlSourceId) REFERENCES crawl_sources (id) ON DELETE CASCADE ); -CREATE INDEX crawl_discussions_url ON crawl_discussions (crawlSourceId, pathname); -CREATE VIRTUAL TABLE crawl_discussions_fts_index USING fts5(title, body, content='crawl_discussions'); +CREATE VIRTUAL TABLE crawl_dats_fts_index USING fts5(title, description, content='crawl_dats'); --- triggers to keep crawl_discussions_fts_index updated -CREATE TRIGGER crawl_discussions_ai AFTER INSERT ON crawl_discussions BEGIN - INSERT INTO crawl_discussions_fts_index(rowid, title, body) VALUES (new.rowid, new.title, new.body); +-- triggers to keep crawl_dats_fts_index updated +CREATE TRIGGER crawl_dats_ai AFTER INSERT ON crawl_dats BEGIN + INSERT INTO crawl_dats_fts_index(rowid, title, description) VALUES (new.rowid, new.title, new.description); END; -CREATE TRIGGER crawl_discussions_ad AFTER DELETE ON crawl_discussions BEGIN - INSERT INTO crawl_discussions_fts_index(crawl_discussions_fts_index, rowid, title, body) VALUES('delete', old.rowid, old.title, old.body); +CREATE TRIGGER crawl_dats_ad AFTER DELETE ON crawl_dats BEGIN + INSERT INTO crawl_dats_fts_index(crawl_dats_fts_index, rowid, title, description) VALUES('delete', old.rowid, old.title, old.description); END; -CREATE TRIGGER crawl_discussions_au AFTER UPDATE ON crawl_discussions BEGIN - INSERT INTO crawl_discussions_fts_index(crawl_discussions_fts_index, rowid, title, body) VALUES('delete', old.rowid, old.title, old.body); - INSERT INTO crawl_discussions_fts_index(rowid, title, body) VALUES (new.rowid, new.title, new.body); +CREATE TRIGGER crawl_dats_au AFTER UPDATE ON crawl_dats BEGIN + INSERT INTO crawl_dats_fts_index(crawl_dats_fts_index, rowid, title, description) VALUES('delete', old.rowid, old.title, old.description); + INSERT INTO crawl_dats_fts_index(rowid, title, description) VALUES (new.rowid, new.title, new.description); END; --- crawled discussion tags -CREATE TABLE crawl_discussions_tags ( - crawlDiscussionId INTEGER, - crawlTagId INTEGER, - - FOREIGN KEY (crawlDiscussionId) REFERENCES crawl_discussions (id) ON DELETE CASCADE, - FOREIGN KEY (crawlTagId) REFERENCES crawl_tags (id) ON DELETE CASCADE -); - --- crawled media -CREATE TABLE crawl_media ( - id INTEGER PRIMARY KEY, - crawlSourceId INTEGER NOT NULL, - pathname TEXT NOT NULL, - crawledAt INTEGER, - - subtype TEXT NOT NULL, - href TEXT NOT NULL, - title TEXT NOT NULL, +-- deprecated +CREATE TABLE bookmarks ( + profileId INTEGER, + url TEXT NOT NULL, + title TEXT, description TEXT, - createdAt INTEGER, - updatedAt INTEGER, + isPublic INTEGER, + pinned INTEGER, + pinOrder INTEGER DEFAULT 0, + createdAt INTEGER DEFAULT (strftime('%s', 'now')), + tags TEXT, + notes TEXT, - FOREIGN KEY (crawlSourceId) REFERENCES crawl_sources (id) ON DELETE CASCADE + PRIMARY KEY (profileId, url), + FOREIGN KEY (profileId) REFERENCES profiles (id) ON DELETE CASCADE ); -CREATE INDEX crawl_media_url ON crawl_media (crawlSourceId, pathname); -CREATE INDEX crawl_media_subtype ON crawl_media (subtype); -CREATE INDEX crawl_media_href ON crawl_media (href); -CREATE VIRTUAL TABLE crawl_media_fts_index USING fts5(title, description, content='crawl_media'); --- triggers to keep crawl_media_fts_index updated -CREATE TRIGGER crawl_media_ai AFTER INSERT ON crawl_media BEGIN - INSERT INTO crawl_media_fts_index(rowid, title, description) VALUES (new.rowid, new.title, new.description); -END; -CREATE TRIGGER crawl_media_ad AFTER DELETE ON crawl_media BEGIN - INSERT INTO crawl_media_fts_index(crawl_media_fts_index, rowid, title, description) VALUES('delete', old.rowid, old.title, old.description); -END; -CREATE TRIGGER crawl_media_au AFTER UPDATE ON crawl_media BEGIN - INSERT INTO crawl_media_fts_index(crawl_media_fts_index, rowid, title, description) VALUES('delete', old.rowid, old.title, old.description); - INSERT INTO crawl_media_fts_index(rowid, title, description) VALUES (new.rowid, new.title, new.description); -END; +-- a list of saved archives +-- deprecated +CREATE TABLE archives ( + profileId INTEGER NOT NULL, + key TEXT NOT NULL, -- dat key + + previewMode INTEGER, -- automatically publish changes (0) or write to local folder (1) + localSyncPath TEXT, -- custom local folder that the data is synced to --- crawled media tags -CREATE TABLE crawl_media_tags ( - crawlMediaId INTEGER, - crawlTagId INTEGER, + isSaved INTEGER, -- is this archive saved to our library? + hidden INTEGER DEFAULT 0, -- should this archive be hidden in the library or select-archive modals? (this is useful for internal dats, such as drafts) + networked INTEGER DEFAULT 1, -- join the swarm (1) or do not swarm (0) + autoDownload INTEGER DEFAULT 1, -- watch and download all available data (1) or sparsely download on demand (0) + autoUpload INTEGER DEFAULT 1, -- join the swarm at startup (1) or only swarm when visiting (0) + expiresAt INTEGER, -- change autoUpload to 0 at this time (used for temporary seeding) + createdAt INTEGER DEFAULT (strftime('%s', 'now')), - FOREIGN KEY (crawlMediaId) REFERENCES crawl_media (id) ON DELETE CASCADE, - FOREIGN KEY (crawlTagId) REFERENCES crawl_tags (id) ON DELETE CASCADE + localPath TEXT, -- deprecated + autoPublishLocal INTEGER DEFAULT 0 -- deprecated -- watch localSyncPath and automatically publish changes (1) or not (0) ); -- a list of the draft-dats for a master-dat @@ -426,6 +348,19 @@ CREATE TABLE archive_drafts ( FOREIGN KEY (profileId) REFERENCES profiles (id) ON DELETE CASCADE ); +-- list of the users current templates +-- deprecated +CREATE TABLE templates ( + profileId INTEGER, + url TEXT NOT NULL, + title TEXT, + screenshot, + createdAt INTEGER DEFAULT (strftime('%s', 'now')), + + PRIMARY KEY (profileId, url), + FOREIGN KEY (profileId) REFERENCES profiles (id) ON DELETE CASCADE +); + -- list of the users installed apps -- deprecated CREATE TABLE apps ( @@ -463,18 +398,123 @@ CREATE TABLE workspaces ( FOREIGN KEY (profileId) REFERENCES profiles (id) ON DELETE CASCADE ); +-- deprecated +-- crawled descriptions of other sites +CREATE TABLE crawl_site_descriptions ( + crawlSourceId INTEGER NOT NULL, + crawledAt INTEGER, + + url TEXT, + title TEXT, + description TEXT, + type TEXT, -- comma separated strings + + FOREIGN KEY (crawlSourceId) REFERENCES crawl_sources (id) ON DELETE CASCADE +); +CREATE VIRTUAL TABLE crawl_site_descriptions_fts_index USING fts5(title, description, content='crawl_site_descriptions'); + +-- deprecated +-- triggers to keep crawl_site_descriptions_fts_index updated +CREATE TRIGGER crawl_site_descriptions_ai AFTER INSERT ON crawl_site_descriptions BEGIN + INSERT INTO crawl_site_descriptions_fts_index(rowid, title, description) VALUES (new.rowid, new.title, new.description); +END; +CREATE TRIGGER crawl_site_descriptions_ad AFTER DELETE ON crawl_site_descriptions BEGIN + INSERT INTO crawl_site_descriptions_fts_index(crawl_site_descriptions_fts_index, rowid, title, description) VALUES('delete', old.rowid, old.title, old.description); +END; +CREATE TRIGGER crawl_site_descriptions_au AFTER UPDATE ON crawl_site_descriptions BEGIN + INSERT INTO crawl_site_descriptions_fts_index(crawl_site_descriptions_fts_index, rowid, title, description) VALUES('delete', old.rowid, old.title, old.description); + INSERT INTO crawl_site_descriptions_fts_index(rowid, title, description) VALUES (new.rowid, new.title, new.description); +END; + +-- deprecated +-- crawled media +CREATE TABLE crawl_media ( + id INTEGER PRIMARY KEY, + crawlSourceId INTEGER NOT NULL, + pathname TEXT NOT NULL, + crawledAt INTEGER, + + subtype TEXT NOT NULL, + href TEXT NOT NULL, + title TEXT NOT NULL, + description TEXT, + createdAt INTEGER, + updatedAt INTEGER, + + FOREIGN KEY (crawlSourceId) REFERENCES crawl_sources (id) ON DELETE CASCADE +); +CREATE INDEX crawl_media_url ON crawl_media (crawlSourceId, pathname); +CREATE INDEX crawl_media_subtype ON crawl_media (subtype); +CREATE INDEX crawl_media_href ON crawl_media (href); +CREATE VIRTUAL TABLE crawl_media_fts_index USING fts5(title, description, content='crawl_media'); + +-- deprecated +-- triggers to keep crawl_media_fts_index updated +CREATE TRIGGER crawl_media_ai AFTER INSERT ON crawl_media BEGIN + INSERT INTO crawl_media_fts_index(rowid, title, description) VALUES (new.rowid, new.title, new.description); +END; +CREATE TRIGGER crawl_media_ad AFTER DELETE ON crawl_media BEGIN + INSERT INTO crawl_media_fts_index(crawl_media_fts_index, rowid, title, description) VALUES('delete', old.rowid, old.title, old.description); +END; +CREATE TRIGGER crawl_media_au AFTER UPDATE ON crawl_media BEGIN + INSERT INTO crawl_media_fts_index(crawl_media_fts_index, rowid, title, description) VALUES('delete', old.rowid, old.title, old.description); + INSERT INTO crawl_media_fts_index(rowid, title, description) VALUES (new.rowid, new.title, new.description); +END; + +-- deprecated +-- crawled media tags +CREATE TABLE crawl_media_tags ( + crawlMediaId INTEGER, + crawlTagId INTEGER, + + FOREIGN KEY (crawlMediaId) REFERENCES crawl_media (id) ON DELETE CASCADE, + FOREIGN KEY (crawlTagId) REFERENCES crawl_tags (id) ON DELETE CASCADE +); + +-- deprecated +-- crawled discussions +CREATE TABLE crawl_discussions ( + id INTEGER PRIMARY KEY, + crawlSourceId INTEGER NOT NULL, + pathname TEXT NOT NULL, + crawledAt INTEGER, + + title TEXT NOT NULL, + body TEXT, + href TEXT, + createdAt INTEGER, + updatedAt INTEGER, + + FOREIGN KEY (crawlSourceId) REFERENCES crawl_sources (id) ON DELETE CASCADE +); +CREATE INDEX crawl_discussions_url ON crawl_discussions (crawlSourceId, pathname); +CREATE VIRTUAL TABLE crawl_discussions_fts_index USING fts5(title, body, content='crawl_discussions'); + +-- deprecated +-- triggers to keep crawl_discussions_fts_index updated +CREATE TRIGGER crawl_discussions_ai AFTER INSERT ON crawl_discussions BEGIN + INSERT INTO crawl_discussions_fts_index(rowid, title, body) VALUES (new.rowid, new.title, new.body); +END; +CREATE TRIGGER crawl_discussions_ad AFTER DELETE ON crawl_discussions BEGIN + INSERT INTO crawl_discussions_fts_index(crawl_discussions_fts_index, rowid, title, body) VALUES('delete', old.rowid, old.title, old.body); +END; +CREATE TRIGGER crawl_discussions_au AFTER UPDATE ON crawl_discussions BEGIN + INSERT INTO crawl_discussions_fts_index(crawl_discussions_fts_index, rowid, title, body) VALUES('delete', old.rowid, old.title, old.body); + INSERT INTO crawl_discussions_fts_index(rowid, title, body) VALUES (new.rowid, new.title, new.body); +END; + +-- deprecated +-- crawled discussion tags +CREATE TABLE crawl_discussions_tags ( + crawlDiscussionId INTEGER, + crawlTagId INTEGER, + + FOREIGN KEY (crawlDiscussionId) REFERENCES crawl_discussions (id) ON DELETE CASCADE, + FOREIGN KEY (crawlTagId) REFERENCES crawl_tags (id) ON DELETE CASCADE +); + -- default profile INSERT INTO profiles (id) VALUES (0); --- default bookmarks -INSERT INTO bookmarks (profileId, title, url, pinned) VALUES (0, 'Beaker Browser', 'dat://beakerbrowser.com', 1); -INSERT INTO bookmarks (profileId, title, url, pinned) VALUES (0, 'Dat Project', 'dat://datproject.org', 0); -INSERT INTO bookmarks (profileId, title, url, pinned) VALUES (0, 'Hashbase', 'https://hashbase.io', 0); -INSERT INTO bookmarks (profileId, title, url, pinned) VALUES (0, 'Documentation', 'dat://beakerbrowser.com/docs', 1); -INSERT INTO bookmarks (profileId, title, url, pinned) VALUES (0, 'Report an issue', 'https://github.com/beakerbrowser/beaker/issues', 0); -INSERT INTO bookmarks (profileId, title, url, pinned) VALUES (0, 'Support Beaker', 'https://opencollective.com/beaker', 1); -INSERT INTO bookmarks (profileId, title, url, pinned) VALUES (0, 'Library', 'beaker://library/', 1); -INSERT INTO bookmarks (profileId, title, url, pinned) VALUES (0, 'Beaker.Social', 'dat://beaker.social', 1); - -PRAGMA user_version = 36; +PRAGMA user_version = 42; ` diff --git a/dbs/schemas/profile-data.v1.sql.js b/dbs/schemas/profile-data.v1.sql.js index f22c43dc..ed2a8049 100644 --- a/dbs/schemas/profile-data.v1.sql.js +++ b/dbs/schemas/profile-data.v1.sql.js @@ -17,7 +17,7 @@ CREATE TABLE archives_meta ( key TEXT PRIMARY KEY, title TEXT, description TEXT, - forkOf TEXT, -- deprecated + forkOf TEXT, createdByUrl TEXT, -- deprecated createdByTitle TEXT, -- deprecated mtime INTEGER, @@ -58,13 +58,5 @@ CREATE UNIQUE INDEX visits_stats_url ON visit_stats (url); -- default profile INSERT INTO profiles (id) VALUES (0); --- default bookmarks -INSERT INTO bookmarks (profileId, title, url, pinned) VALUES (0, 'Beaker Browser', 'dat://beakerbrowser.com', 1); -INSERT INTO bookmarks (profileId, title, url, pinned) VALUES (0, 'Dat Project', 'dat://datproject.org', 0); -INSERT INTO bookmarks (profileId, title, url, pinned) VALUES (0, 'Hashbase', 'https://hashbase.io', 0); -INSERT INTO bookmarks (profileId, title, url, pinned) VALUES (0, 'Documentation', 'dat://beakerbrowser.com/docs', 1); -INSERT INTO bookmarks (profileId, title, url, pinned) VALUES (0, 'Report an issue', 'https://github.com/beakerbrowser/beaker/issues', 0); -INSERT INTO bookmarks (profileId, title, url, pinned) VALUES (0, 'Support Beaker', 'https://opencollective.com/beaker', 1); - PRAGMA user_version = 1; ` diff --git a/dbs/schemas/profile-data.v37.sql.js b/dbs/schemas/profile-data.v37.sql.js new file mode 100644 index 00000000..25f27475 --- /dev/null +++ b/dbs/schemas/profile-data.v37.sql.js @@ -0,0 +1,6 @@ +module.exports = ` + +ALTER TABLE crawl_sources ADD COLUMN isPrivate INTEGER; + +PRAGMA user_version = 37; +` \ No newline at end of file diff --git a/dbs/schemas/profile-data.v38.sql.js b/dbs/schemas/profile-data.v38.sql.js new file mode 100644 index 00000000..f7a64ec0 --- /dev/null +++ b/dbs/schemas/profile-data.v38.sql.js @@ -0,0 +1,6 @@ +module.exports = ` + +ALTER TABLE archives_meta ADD COLUMN author TEXT; + +PRAGMA user_version = 38; +` \ No newline at end of file diff --git a/dbs/schemas/profile-data.v39.sql.js b/dbs/schemas/profile-data.v39.sql.js new file mode 100644 index 00000000..dedfae79 --- /dev/null +++ b/dbs/schemas/profile-data.v39.sql.js @@ -0,0 +1,34 @@ +module.exports = ` +DROP TRIGGER crawl_posts_ai; +DROP TRIGGER crawl_posts_ad; +DROP TRIGGER crawl_posts_au; +DROP TABLE IF EXISTS crawl_posts; + +-- crawled statuses +CREATE TABLE crawl_statuses ( + crawlSourceId INTEGER NOT NULL, + pathname TEXT NOT NULL, + crawledAt INTEGER, + + body TEXT, + createdAt INTEGER, + updatedAt INTEGER, + + FOREIGN KEY (crawlSourceId) REFERENCES crawl_sources (id) ON DELETE CASCADE +); +CREATE VIRTUAL TABLE crawl_statuses_fts_index USING fts5(body, content='crawl_statuses'); + +-- triggers to keep crawl_statuses_fts_index updated +CREATE TRIGGER crawl_statuses_ai AFTER INSERT ON crawl_statuses BEGIN + INSERT INTO crawl_statuses_fts_index(rowid, body) VALUES (new.rowid, new.body); +END; +CREATE TRIGGER crawl_statuses_ad AFTER DELETE ON crawl_statuses BEGIN + INSERT INTO crawl_statuses_fts_index(crawl_statuses_fts_index, rowid, body) VALUES('delete', old.rowid, old.body); +END; +CREATE TRIGGER crawl_statuses_au AFTER UPDATE ON crawl_statuses BEGIN + INSERT INTO crawl_statuses_fts_index(crawl_statuses_fts_index, rowid, body) VALUES('delete', old.rowid, old.body); + INSERT INTO crawl_statuses_fts_index(rowid, body) VALUES (new.rowid, new.body); +END; + +PRAGMA user_version = 39; +` \ No newline at end of file diff --git a/dbs/schemas/profile-data.v40.sql.js b/dbs/schemas/profile-data.v40.sql.js new file mode 100644 index 00000000..0f9ad539 --- /dev/null +++ b/dbs/schemas/profile-data.v40.sql.js @@ -0,0 +1,21 @@ +module.exports = ` + +DROP INDEX IF EXISTS crawl_reactions_topic; +DROP TABLE IF EXISTS crawl_reactions; + +-- crawled reactions +CREATE TABLE crawl_reactions ( + crawlSourceId INTEGER NOT NULL, + pathname TEXT NOT NULL, + crawledAt INTEGER, + + topic TEXT NOT NULL, + phrases TEXT NOT NULL, + + PRIMARY KEY (crawlSourceId, pathname), + FOREIGN KEY (crawlSourceId) REFERENCES crawl_sources (id) ON DELETE CASCADE +); +CREATE INDEX crawl_reactions_topic ON crawl_reactions (topic); + +PRAGMA user_version = 40; +` \ No newline at end of file diff --git a/dbs/schemas/profile-data.v41.sql.js b/dbs/schemas/profile-data.v41.sql.js new file mode 100644 index 00000000..c5347d41 --- /dev/null +++ b/dbs/schemas/profile-data.v41.sql.js @@ -0,0 +1,21 @@ +module.exports = ` + +ALTER TABLE archives_meta ADD COLUMN type TEXT; +DROP TABLE IF EXISTS archives_meta_type; + +-- crawled dats +CREATE TABLE crawl_dats ( + crawlSourceId INTEGER NOT NULL, + crawledAt INTEGER, + + key TEXT NOT NULL, + title TEXT, + description TEXT, + type TEXT, + + PRIMARY KEY (crawlSourceId, key), + FOREIGN KEY (crawlSourceId) REFERENCES crawl_sources (id) ON DELETE CASCADE +); + +PRAGMA user_version = 41; +` \ No newline at end of file diff --git a/dbs/schemas/profile-data.v42.sql.js b/dbs/schemas/profile-data.v42.sql.js new file mode 100644 index 00000000..73f606d8 --- /dev/null +++ b/dbs/schemas/profile-data.v42.sql.js @@ -0,0 +1,32 @@ +module.exports = ` + +CREATE VIRTUAL TABLE crawl_dats_fts_index USING fts5(title, description, content='crawl_dats'); + +-- triggers to keep crawl_dats_fts_index updated +CREATE TRIGGER crawl_dats_ai AFTER INSERT ON crawl_dats BEGIN + INSERT INTO crawl_dats_fts_index(rowid, title, description) VALUES (new.rowid, new.title, new.description); +END; +CREATE TRIGGER crawl_dats_ad AFTER DELETE ON crawl_dats BEGIN + INSERT INTO crawl_dats_fts_index(crawl_dats_fts_index, rowid, title, description) VALUES('delete', old.rowid, old.title, old.description); +END; +CREATE TRIGGER crawl_dats_au AFTER UPDATE ON crawl_dats BEGIN + INSERT INTO crawl_dats_fts_index(crawl_dats_fts_index, rowid, title, description) VALUES('delete', old.rowid, old.title, old.description); + INSERT INTO crawl_dats_fts_index(rowid, title, description) VALUES (new.rowid, new.title, new.description); +END; + +CREATE VIRTUAL TABLE archives_meta_fts_index USING fts5(title, description, content='archives_meta'); + +-- triggers to keep archives_meta_fts_index updated +CREATE TRIGGER archives_meta_ai AFTER INSERT ON archives_meta BEGIN + INSERT INTO archives_meta_fts_index(rowid, title, description) VALUES (new.rowid, new.title, new.description); +END; +CREATE TRIGGER archives_meta_ad AFTER DELETE ON archives_meta BEGIN + INSERT INTO archives_meta_fts_index(archives_meta_fts_index, rowid, title, description) VALUES('delete', old.rowid, old.title, old.description); +END; +CREATE TRIGGER archives_meta_au AFTER UPDATE ON archives_meta BEGIN + INSERT INTO archives_meta_fts_index(archives_meta_fts_index, rowid, title, description) VALUES('delete', old.rowid, old.title, old.description); + INSERT INTO archives_meta_fts_index(rowid, title, description) VALUES (new.rowid, new.title, new.description); +END; + +PRAGMA user_version = 42; +` \ No newline at end of file diff --git a/dbs/sitedata.js b/dbs/sitedata.js index becfb0e4..48fd5afb 100644 --- a/dbs/sitedata.js +++ b/dbs/sitedata.js @@ -32,12 +32,14 @@ exports.setup = function (opts) { * @param {number | string} value * @param {Object} [opts] * @param {boolean} [opts.dontExtractOrigin] + * @param {boolean} [opts.normalizeUrl] * @returns {Promise} */ const set = exports.set = async function (url, key, value, opts) { await setupPromise var origin = opts && opts.dontExtractOrigin ? url : await extractOrigin(url) if (!origin) return null + if (opts && opts.normalizeUrl) origin = normalizeUrl(origin) return cbPromise(cb => { db.run(` INSERT OR REPLACE @@ -68,12 +70,14 @@ const clear = exports.clear = async function (url, key) { * @param {string} key * @param {Object} [opts] * @param {boolean} [opts.dontExtractOrigin] + * @param {boolean} [opts.normalizeUrl] * @returns {Promise} */ const get = exports.get = async function (url, key, opts) { await setupPromise var origin = opts && opts.dontExtractOrigin ? url : await extractOrigin(url) if (!origin) return null + if (opts && opts.normalizeUrl) origin = normalizeUrl(origin) return cbPromise(cb => { db.get(`SELECT value FROM sitedata WHERE origin = ? AND key = ?`, [origin, key], (err, res) => { if (err) return cb(err) @@ -193,6 +197,18 @@ async function extractOrigin (originURL) { return (urlp.protocol + urlp.host) } +/** + * @param {string} originURL + * @returns {string} + */ +function normalizeUrl (originURL) { + try { + var urlp = new URL(originURL) + return (urlp.protocol + '//' + urlp.hostname + urlp.pathname).replace(/([/]$)/g, '') + } catch (e) {} + return originURL +} + migrations = [ // version 1 // - includes favicons for default bookmarks diff --git a/dbs/templates.js b/dbs/templates.js deleted file mode 100644 index 61a9e387..00000000 --- a/dbs/templates.js +++ /dev/null @@ -1,69 +0,0 @@ -const db = require('./profile-data-db') - -// typedefs -// = - -/** - * @typedef {Object} Template - * @prop {string} url - * @prop {string} title - * @prop {number} createdAt - * - * @typedef {Object} TemplateScreenshot - * @prop {string} url - * @prop {string} screenshot - */ - -// exported api -// = - -/** - * @param {number} profileId - * @param {string} url - * @returns {Promise