Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

refactor: on-demand remote pin status checks #1903

Merged
merged 4 commits into from
Feb 11, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
168 changes: 96 additions & 72 deletions package-lock.json

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion public/locales/en/notify.json
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
"ipfsInvalidApiAddress": "The provided IPFS API address is invalid.",
"ipfsConnectSuccess": "Successfully connected to the IPFS API address",
"ipfsConnectFail": "Unable to connect to the provided IPFS API address",
"ipfsPinFail": "Unable to set pinning. Try again, or see the browser console for more info.",
"ipfsPinFailReason": "Unable to set pinning at {serviceName}: {errorMsg}",
"ipfsIsBack": "Normal IPFS service has resumed. Enjoy!",
"folderExists": "An item with that name already exists. Please choose another.",
"filesFetchFailed": "Failed to get those files. Please check the path and try again.",
Expand Down
15 changes: 0 additions & 15 deletions src/bundles/files/actions.js
Original file line number Diff line number Diff line change
Expand Up @@ -559,21 +559,6 @@ const actions = () => ({
*/
doFilesClear: () => send({ type: ACTIONS.CLEAR_ALL }),

/**
* Gets total size of the local pins. On successful completion `state.mfsSize` will get
* updated.
*/
doPinsStatsGet: () => perform(ACTIONS.PINS_SIZE_GET, async (ipfs) => {
const pinsSize = -1 // TODO: right now calculating size of all pins is too expensive (requires ipfs.files.stat per CID)
let numberOfPins = 0

for await (const _ of ipfs.pin.ls({ type: 'recursive' })) { // eslint-disable-line no-unused-vars
numberOfPins++
}

return { pinsSize, numberOfPins }
}),

/**
* Gets size of the MFS. On successful completion `state.mfsSize` will get
* updated.
Expand Down
13 changes: 11 additions & 2 deletions src/bundles/notify.js
Original file line number Diff line number Diff line change
Expand Up @@ -69,8 +69,17 @@ const notify = {
eventId: `experimentsErrors.${action.payload.key}`
}
}
if (action.type === 'IPFS_PIN_FAILED') {
return {
...state,
show: true,
error: true,
msgArgs: action.msgArgs,
eventId: action.type
}
}

if (action.type === 'IPFS_CONNECT_FAILED' || action.type === 'IPFS_PIN_FAILED') {
if (action.type === 'IPFS_CONNECT_FAILED') {
return {
...state,
show: true,
Expand Down Expand Up @@ -119,7 +128,7 @@ const notify = {
return 'ipfsInvalidApiAddress'
}
if (eventId === 'IPFS_PIN_FAILED') {
return 'ipfsPinFail'
return 'ipfsPinFailReason'
}

if (eventId === 'FILES_EVENT_FAILED') {
Expand Down
136 changes: 76 additions & 60 deletions src/bundles/pinning.js
Original file line number Diff line number Diff line change
@@ -1,22 +1,33 @@
// @ts-check
import { pinningServiceTemplates } from '../constants/pinning'
import memoize from 'p-memoize'
import CID from 'cids'

// This bundle leverages createCacheBundle and persistActions for
// the persistence layer that keeps pins in IndexDB store
// to ensure they are around across restarts/reloads/refactors/releases.

const CID_PIN_CHECK_BATCH_SIZE = 10 // Pinata returns error when >10

// id = `${serviceName}:${cid}`
const cacheId2Cid = (id) => id.split(':').at(-1)
const cacheId2ServiceName = (id) => id.split(':').at(0)

const parseService = async (service, remoteServiceTemplates, ipfs) => {
const template = remoteServiceTemplates.find(t => service.endpoint.toString() === t.apiEndpoint.toString())
const icon = template?.icon
const visitServiceUrl = template?.visitServiceUrl
const autoUpload = await mfsPolicyEnableFlag(service.service, ipfs)
const parsedService = { ...service, name: service.service, icon, visitServiceUrl, autoUpload }
const parsedService = { ...service, name: service.service, icon, visitServiceUrl }

if (service?.stat?.status === 'invalid') {
return { ...parsedService, numberOfPins: -1, online: false }
}

const numberOfPins = service.stat?.pinCount?.pinned
const online = typeof numberOfPins === 'number'
const autoUpload = online ? await mfsPolicyEnableFlag(service.service, ipfs) : undefined

return { ...parsedService, numberOfPins, online }
return { ...parsedService, numberOfPins, online, autoUpload }
}

const mfsPolicyEnableFlag = memoize(async (serviceName, ipfs) => {
Expand All @@ -43,35 +54,26 @@ const uniqueCidBatches = (arrayOfCids, size) => {
return result
}

/**
* TODO: This might change, current version from: https://github.com/ipfs/go-ipfs/blob/petar/pincli/core/commands/remotepin.go#L53
* @typedef {Object} RemotePin
* @property {string} id
* @property {string} name
* @property {('queued'|'pinning'|'pinned'|'failed')} status
* @property {string} cid
* @property {Array<string>} [delegates] (multiaddrs endind with /p2p/peerid)
*/
const pinningBundle = {
name: 'pinning',
persistActions: ['UPDATE_REMOTE_PINS'],
reducer: (state = {
pinningServices: [],
remotePins: [],
notRemotePins: [],
arePinningServicesSupported: false
}, action) => {
if (action.type === 'CACHE_REMOTE_PINS') {
const { adds, removals } = action.payload
const remotePins = [...state.remotePins, ...adds].filter(p => !removals.some(r => r === p.id))
const notRemotePins = [...state.notRemotePins, ...removals].filter(rid => !adds.some(a => a.id === rid))
if (action.type === 'UPDATE_REMOTE_PINS') {
const { adds = [], removals = [] } = action.payload
const uniq = (arr) => [...new Set(arr)]
const remotePins = uniq([...state.remotePins, ...adds].filter(p => !removals.some(r => r === p)))
const notRemotePins = uniq([...state.notRemotePins, ...removals].filter(p => !adds.some(a => a === p)))
return { ...state, remotePins, notRemotePins }
}
if (action.type === 'SET_REMOTE_PINNING_SERVICES') {
const oldServices = state.pinningServices
const newServices = action.payload
// Skip update when list length did not change and new one has no stats
// so there is no janky update in 'Set pinning modal' when 3+ services
// are defined and some of them are offline.
if (oldServices.length === newServices.length) {
const withPinStats = s => (s && typeof s.numberOfPins !== 'undefined')
const oldStats = oldServices.some(withPinStats)
Expand All @@ -86,73 +88,74 @@ const pinningBundle = {
return state
},

doFetchRemotePins: (files) => async ({ dispatch, store, getIpfs }) => {
// Only check services that are confirmed to be online
const pinningServices = store.selectPinningServices().filter(s => s.online)

doFetchRemotePins: (files, skipCache = false) => async ({ dispatch, store, getIpfs }) => {
const pinningServices = store.selectPinningServices()
if (!pinningServices?.length) return

const ipfs = getIpfs()

if (!ipfs || store?.ipfs?.ipfs?.ready || !ipfs.pin.remote) return

const allCids = files ? files.map(f => f.cid) : []
const allCids = files ? files.map(f => f.cid.toString()) : []

// Reuse known state for some CIDs to avoid unnecessary requests
const cacheId2Cid = (id) => id.split(':').slice(-1)[0]
const remotePins = store.selectRemotePins().map(pin => pin.id)
const remotePins = store.selectRemotePins()
const notRemotePins = store.selectNotRemotePins()

// Check remaining CID status in chunks of 10 (remote API limitation)
const cids = uniqueCidBatches(allCids, 10)
// Check remaining CID status in chunks based on API limitation seen in real world
const cids = uniqueCidBatches(allCids, CID_PIN_CHECK_BATCH_SIZE)

const adds = []
const removals = []

await Promise.allSettled(pinningServices.map(async service => {
try {
// skip CIDs that we know the state of at this service
const skipCids = new Set(
const skipCids = skipCache ? new Set() : new Set(
[...remotePins, ...notRemotePins]
.filter(id => id.startsWith(service.name))
.map(cacheId2Cid)
)
return Promise.allSettled(cids.map(async cidChunk => {
for (const cidChunk of cids) {
const cidsToCheck = cidChunk.filter(cid => !skipCids.has(cid.toString()))
if (!cidsToCheck.length) return // skip if no new cids to check
if (!cidsToCheck.length) continue // skip if no new cids to check
const notPins = new Set(cidsToCheck.map(cid => cid.toString()))
const pins = ipfs.pin.remote.ls({ service: service.name, cid: cidsToCheck })
for await (const pin of pins) {
const pinCid = pin.cid.toString()
notPins.delete(pinCid)
adds.push({ id: `${service.name}:${pinCid}`, ...pin })
try {
/* TODO: wrap pin.remote.*calls with progressive backoff when response Type == "error" and Message includes "429 Too Many Requests"
* and see if we could make go-ipfs include Retry-After header in payload description for this type of error */
const pins = ipfs.pin.remote.ls({ service: service.name, cid: cidsToCheck.map(cid => new CID(cid)) })
for await (const pin of pins) {
const pinCid = pin.cid.toString()
notPins.delete(pinCid)
adds.push(`${service.name}:${pinCid}`)
}
// store 'not pinned remotely on this service' to avoid future checks
} catch (e) {
console.error(`Error: pin.remote.ls service=${service.name} cid=${cidsToCheck}: ${e.toString()}`)
}
// store 'not pinned remotely on this service' to avoid future checks
// cache remaining ones as not pinned
for (const notPinCid of notPins) {
removals.push(`${service.name}:${notPinCid}`)
}
}))
}
} catch (e) {
// ignore service and network errors for now
// and continue checking remaining ones
console.error('unexpected error during doFetchRemotePins', e)
}
}))
dispatch({ type: 'CACHE_REMOTE_PINS', payload: { adds, removals } })
dispatch({ type: 'UPDATE_REMOTE_PINS', payload: { adds, removals } })
},

selectRemotePins: (state) => state.pinning.remotePins || [],
selectNotRemotePins: (state) => state.pinning.notRemotePins || [],

doSelectRemotePinsForFile: (file) => ({ store }) => {
const pinningServicesNames = store.selectPinningServices().map(remote => remote.name)

const remotePinForFile = store.selectRemotePins().filter(pin => pin.cid.string === file.cid.string)
const servicesBeingUsed = remotePinForFile.map(pin => pin.id.split(':')[0]).filter(pinId => pinningServicesNames.includes(pinId))

const remotePinForFile = store.selectRemotePins().filter(pin => cacheId2Cid(pin) === file.cid.toString())
const servicesBeingUsed = remotePinForFile.map(pin => cacheId2ServiceName(pin)).filter(name => pinningServicesNames.includes(name))
return servicesBeingUsed
},

// list of services without online check (reads list from config, should be instant)
doFetchPinningServices: () => async ({ getIpfs, store, dispatch }) => {
const ipfs = getIpfs()
if (!ipfs || store?.ipfs?.ipfs?.ready || !ipfs.pin.remote) return null
Expand All @@ -162,14 +165,23 @@ const pinningBundle = {
if (!isPinRemotePresent) return null

const remoteServiceTemplates = store.selectRemoteServiceTemplates()
// list of services without online check (should be instant)
const offlineListOfServices = await ipfs.pin.remote.service.ls()
const remoteServices = await Promise.all(offlineListOfServices.map(service => parseService(service, remoteServiceTemplates, ipfs)))
dispatch({ type: 'SET_REMOTE_PINNING_SERVICES', payload: remoteServices })
// slower list of services + their pin stats (usually slower)
const fullListOfServices = await ipfs.pin.remote.service.ls({ stat: true })
const fullRemoteServices = await Promise.all(fullListOfServices.map(service => parseService(service, remoteServiceTemplates, ipfs)))
dispatch({ type: 'SET_REMOTE_PINNING_SERVICES', payload: fullRemoteServices })
},

// fetching pin stats for services is slower/expensive, so we only do that on Settings
doFetchPinningServicesStats: () => async ({ getIpfs, store, dispatch }) => {
const ipfs = getIpfs()
if (!ipfs || store?.ipfs?.ipfs?.ready || !ipfs.pin.remote) return null
const isPinRemotePresent = (await ipfs.commands()).Subcommands.find(c => c.Name === 'pin').Subcommands.some(c => c.Name === 'remote')
if (!isPinRemotePresent) return null

const remoteServiceTemplates = store.selectRemoteServiceTemplates()
const servicesWithStats = await ipfs.pin.remote.service.ls({ stat: true })
const remoteServices = await Promise.all(servicesWithStats.map(service => parseService(service, remoteServiceTemplates, ipfs)))

dispatch({ type: 'SET_REMOTE_PINNING_SERVICES', payload: remoteServices })
},

selectPinningServices: (state) => state.pinning.pinningServices || [],
Expand All @@ -186,54 +198,58 @@ const pinningBundle = {
}
}), {}),

doSetPinning: (pin, services = [], wasLocallyPinned, previousRemotePins = []) => async ({ getIpfs, store, dispatch }) => {
doSetPinning: (file, services = [], wasLocallyPinned, previousRemotePins = []) => async ({ getIpfs, store, dispatch }) => {
const ipfs = getIpfs()
const { cid, name } = pin
const { cid, name } = file

const pinLocally = services.includes('local')
if (wasLocallyPinned !== pinLocally) {
try {
pinLocally ? await ipfs.pin.add(cid) : await ipfs.pin.rm(cid)
} catch (e) {
console.error(`unexpected local pin error for ${cid} (${name})`, e)
dispatch({ type: 'IPFS_PIN_FAILED' })
const msgArgs = { serviceName: 'local', errorMsg: e.toString() }
dispatch({ type: 'IPFS_PIN_FAILED', msgArgs })
}
}

const adds = []
const removals = []

store.selectPinningServices().filter(s => s.online).forEach(async service => {
store.selectPinningServices().forEach(async service => {
const shouldPin = services.includes(service.name)
const wasPinned = previousRemotePins.includes(service.name)
if (wasPinned === shouldPin) return

const id = `${service.name}:${cid}`
try {
const id = `${service.name}:${pin.cid}`
if (shouldPin) {
adds.push({ id, ...pin })
// TODO: remove background:true and add pin job to queue.
// wait for pinning to finish + add indicator for ongoing pinning
adds.push(id)
/* TODO: remove background:true below and add pin job to persisted queue.
* We want track ongoing pinning across restarts of webui/ipfs-desktop
* See: https://github.com/ipfs/ipfs-webui/issues/1752 */
await ipfs.pin.remote.add(cid, { service: service.name, name, background: true })
} else {
removals.push(id)
await ipfs.pin.remote.rm({ cid: [cid], service: service.name })
}
} catch (e) {
// log error and continue with other services
console.error(`unexpected pin.remote error for ${cid}@${service.name}`, e)
dispatch({ type: 'IPFS_PIN_FAILED' })
console.error(`ipfs.pin.remote error for ${cid}@${service.name}`, e)
const msgArgs = { serviceName: service.name, errorMsg: e.toString() }
dispatch({ type: 'IPFS_PIN_FAILED', msgArgs })
}
})

dispatch({ type: 'CACHE_REMOTE_PINS', payload: { adds, removals } })
dispatch({ type: 'UPDATE_REMOTE_PINS', payload: { adds, removals } })

await store.doPinsFetch()
},
doAddPinningService: ({ apiEndpoint, nickname, secretApiKey }) => async ({ getIpfs }) => {
const ipfs = getIpfs()

// temporary mitigation for https://github.com/ipfs/ipfs-webui/issues/1770
// update: still present a year later – i think there is a lesson here :-)
nickname = nickname.replaceAll('.', '_')

await ipfs.pin.remote.service.add(nickname, {
Expand Down
4 changes: 2 additions & 2 deletions src/components/notify/Notify.js
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,12 @@ import { withTranslation } from 'react-i18next'
import Toast from './Toast'

const Notify = ({ t, notify, notifyI18nKey, doNotifyDismiss }) => {
const { show, error } = notify
const { show, error, msgArgs } = notify
if (!show) return null

return (
<Toast error={error} onDismiss={doNotifyDismiss}>
{t(notifyI18nKey)}
{t(notifyI18nKey, msgArgs)}
</Toast>
)
}
Expand Down
Loading