From d2ec88ab233a5a36347f865e93ba961e1492e2fb Mon Sep 17 00:00:00 2001 From: Dan Park Date: Sun, 16 Jun 2024 22:40:02 +0900 Subject: [PATCH] Deploy website - based on 087c51350e6de8ef7fd7f36ed09b1dcbdd56b57f --- 404.html | 4 ++-- assets/js/197156b8.124638a4.js | 1 - assets/js/197156b8.28c8d439.js | 1 + ...{runtime~main.80b4a2af.js => runtime~main.7065dc27.js} | 2 +- developers.html | 4 ++-- developers/build-your-contract.html | 4 ++-- developers/build-your-contract/deploy-your-contract.html | 4 ++-- developers/build-your-contract/developer-tools.html | 4 ++-- developers/client-apis.html | 4 ++-- developers/deployed-contracts.html | 4 ++-- developers/differences-from-ethereum.html | 4 ++-- developers/how-can-i-restore-my-account.html | 4 ++-- index.html | 4 ++-- learn.html | 4 ++-- learn/key-features/layered-architecture/ethanos.html | 4 ++-- learn/key-features/layered-architecture/overview.html | 4 ++-- learn/key-features/over-pos/overview.html | 4 ++-- learn/key-features/over-pos/requirements.html | 4 ++-- learn/key-features/over-pos/rewards-and-penalties.html | 4 ++-- learn/key-features/tokenomics/distribution.html | 8 +++++--- learn/key-features/tokenomics/fee.html | 4 ++-- learn/key-features/tokenomics/feedback.html | 4 ++-- learn/key-features/tokenomics/overview.html | 4 ++-- operators.html | 4 ++-- operators/CLI-options/chronos.html | 4 ++-- operators/CLI-options/kairos.html | 4 ++-- operators/faqs.html | 4 ++-- operators/operate-restoration-client.html | 4 ++-- operators/operate-validators.html | 4 ++-- operators/run-a-node.html | 4 ++-- operators/system-requirements.html | 4 ++-- 31 files changed, 61 insertions(+), 59 deletions(-) delete mode 100644 assets/js/197156b8.124638a4.js create mode 100644 assets/js/197156b8.28c8d439.js rename assets/js/{runtime~main.80b4a2af.js => runtime~main.7065dc27.js} (97%) diff --git a/404.html b/404.html index d3e53f8..0418db7 100644 --- a/404.html +++ b/404.html @@ -4,13 +4,13 @@ Page Not Found | OverProtocol Docs - +
Skip to main content

Page Not Found

We could not find what you were looking for.

Please contact the owner of the site that linked you to the original URL and let them know their link is broken.

- + \ No newline at end of file diff --git a/assets/js/197156b8.124638a4.js b/assets/js/197156b8.124638a4.js deleted file mode 100644 index b936517..0000000 --- a/assets/js/197156b8.124638a4.js +++ /dev/null @@ -1 +0,0 @@ -"use strict";(self.webpackChunkover_docs=self.webpackChunkover_docs||[]).push([[579],{3905:(e,t,r)=>{r.d(t,{Zo:()=>d,kt:()=>h});var a=r(7294);function n(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function i(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,a)}return r}function o(e){for(var t=1;t=0||(n[r]=e[r]);return n}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(n[r]=e[r])}return n}var l=a.createContext({}),u=function(e){var t=a.useContext(l),r=t;return e&&(r="function"==typeof e?e(t):o(o({},t),e)),r},d=function(e){var t=u(e.components);return a.createElement(l.Provider,{value:t},e.children)},c="mdxType",m={inlineCode:"code",wrapper:function(e){var t=e.children;return a.createElement(a.Fragment,{},t)}},p=a.forwardRef((function(e,t){var r=e.components,n=e.mdxType,i=e.originalType,l=e.parentName,d=s(e,["components","mdxType","originalType","parentName"]),c=u(r),p=n,h=c["".concat(l,".").concat(p)]||c[p]||m[p]||i;return r?a.createElement(h,o(o({ref:t},d),{},{components:r})):a.createElement(h,o({ref:t},d))}));function h(e,t){var r=arguments,n=t&&t.mdxType;if("string"==typeof e||n){var i=r.length,o=new Array(i);o[0]=p;var s={};for(var l in t)hasOwnProperty.call(t,l)&&(s[l]=t[l]);s.originalType=e,s[c]="string"==typeof e?e:n,o[1]=s;for(var u=2;u{r.r(t),r.d(t,{assets:()=>l,contentTitle:()=>o,default:()=>m,frontMatter:()=>i,metadata:()=>s,toc:()=>u});var a=r(7462),n=(r(7294),r(3905));const i={title:"Token Distribution",description:"An introduction for distribution information of OVER",lang:"en"},o=void 0,s={unversionedId:"learn/key-features/tokenomics/distribution",id:"learn/key-features/tokenomics/distribution",title:"Token Distribution",description:"An introduction for distribution information of OVER",source:"@site/docs/learn/key-features/tokenomics/distribution.md",sourceDirName:"learn/key-features/tokenomics",slug:"/learn/key-features/tokenomics/distribution",permalink:"/learn/key-features/tokenomics/distribution",draft:!1,editUrl:"https://github.com/overprotocol/overprotocol.github.io/edit/develop/docs/learn/key-features/tokenomics/distribution.md",tags:[],version:"current",frontMatter:{title:"Token Distribution",description:"An introduction for distribution information of OVER",lang:"en"},sidebar:"learnSidebar",previous:{title:"Tokenomics Overview",permalink:"/learn/key-features/tokenomics/overview"},next:{title:"Deposit and Yield",permalink:"/learn/key-features/tokenomics/feedback"}},l={},u=[{value:"Token Allocation",id:"token-allocation",level:2},{value:"1. Staking Rewards",id:"1-staking-rewards",level:3},{value:"2. DAO Treasury",id:"treasury",level:3},{value:"3. Over Community Access Program(OCAP)",id:"3-over-community-access-programocap",level:3},{value:"4. Others",id:"4-others",level:3},{value:"Staking Rewards",id:"staking-rewards",level:2},{value:"Minimum Guaranteed Rewards",id:"minimum-guaranteed-rewards",level:3},{value:"Adjustable Rewards",id:"adjustable-rewards",level:3}],d={toc:u},c="wrapper";function m(e){let{components:t,...r}=e;return(0,n.kt)(c,(0,a.Z)({},d,r,{components:t,mdxType:"MDXLayout"}),(0,n.kt)("admonition",{type:"info"},(0,n.kt)("p",{parentName:"admonition"},(0,n.kt)("strong",{parentName:"p"},"Disclaimer"),": The tokenomics details, including the distribution and allocation mechanisms, are still under development and may be adjusted before the mainnet launch to better serve the evolving needs of the OverProtocol ecosystem.")),(0,n.kt)("p",null,"OverProtocol has a supply schedule that releases 1 billion OVER over 10 years. Upon entering the 11th year, no additional tokens will be issued."),(0,n.kt)("h2",{id:"token-allocation"},"Token Allocation"),(0,n.kt)("h3",{id:"1-staking-rewards"},"1. Staking Rewards"),(0,n.kt)("p",null,"30% of the total tokens, equating to 300 million OVER, will be distributed over 10 years. The issuance plan allocates 200 million OVER as the minimum guaranteed reward, distributed in equal annual amounts to stakers. The remaining 100 million OVER will be used as an adjustable reward, modulated in real-time by the system without human intervention, based on the desired staking quantity. Further details are described ",(0,n.kt)("a",{parentName:"p",href:"#staking-rewards"},"below"),"."),(0,n.kt)("h3",{id:"treasury"},"2. DAO Treasury"),(0,n.kt)("p",null,"The DAO Treasury is a pivotal component in the sustainable growth and development of the OverProtocol ecosystem, with the goal of supporting DeFi, Layer 2 solutions, and other ecosystem initiatives. It will introduce governance mechanisms allowing participation from node validators, Nethers stakers, and potentially other contributors in deciding the allocation of treasury funds. The treasury will be funded by a linear distribution of 10% of the total supply (100 million OVER) over ten years, along with the base fee from transaction fees being allocated to the treasury."),(0,n.kt)("h3",{id:"3-over-community-access-programocap"},"3. Over Community Access Program(OCAP)"),(0,n.kt)("p",null,"Of the total supply, 15% is initially allocated to the OCAP. OCAP facilitates the distribution of OVER in various ways, such as airdrops for early community members and contributors, or through liquidity provision. The goal is to make participation in OverProtocol accessible to those who share our vision."),(0,n.kt)("h3",{id:"4-others"},"4. Others"),(0,n.kt)("p",null,"The remaining 450 Million OVER is earmarked for distribution to 4 entities (Core Contributors, Investors, Over Technologies, and Over Foundation) over a 4-year schedule. Refer to Table \\ref{table:0} for the yearly allocation amounts. Each percentage point indicates the proportion of allocation distribution relative to the total 1 billion OVER."),(0,n.kt)("img",{src:"/img/alloc_chart.png",style:{width:500},alt:"alloc_chart"}),(0,n.kt)("h2",{id:"staking-rewards"},"Staking Rewards"),(0,n.kt)("h3",{id:"minimum-guaranteed-rewards"},"Minimum Guaranteed Rewards"),(0,n.kt)("p",null,"OverProtocol's token issuance plan allocates 200 million OVER as the minimum guaranteed reward, distributed in equal annual amounts to stakers. These rewards are proportionally distributed to validators based on their staking balance and required participation rate every epoch."),(0,n.kt)("h3",{id:"adjustable-rewards"},"Adjustable Rewards"),(0,n.kt)("p",null,"The actual release of staking rewards is adjusted by the protocol's predefined feedback mechanism. This mechanism acts as a reserve system to modulate the issuance levels: it reduces actual issuance when staking rewards are sufficient and increases issuance when additional rewards are needed. These adjustments are made based on the current staking rate, ranging between a minimum guaranteed reward of 20 million OVER per year to a maximum of 30 million OVER per year. Importantly, this reserve system is not managed by external entities but is governed by an internal feedback mechanism within the protocol. Refer to ",(0,n.kt)("a",{parentName:"p",href:"/learn/key-features/tokenomics/feedback"},"this page")," for a comprehensive overview of the feedback mechanism."),(0,n.kt)("p",null,"After the 10-year issuance period is over, any remaining reserve for the adjustable rewards will continue to be distributed to stakers."),(0,n.kt)("table",null,(0,n.kt)("thead",{parentName:"table"},(0,n.kt)("tr",{parentName:"thead"},(0,n.kt)("th",{parentName:"tr",align:null},"Year"),(0,n.kt)("th",{parentName:"tr",align:null},"Minimum Issuance"),(0,n.kt)("th",{parentName:"tr",align:null},"Maximum Issuance"))),(0,n.kt)("tbody",{parentName:"table"},(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:null},"Year 1 ~ 10"),(0,n.kt)("td",{parentName:"tr",align:null},"20M OVER"),(0,n.kt)("td",{parentName:"tr",align:null},"30M OVER")),(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:null},"Year 11 ~"),(0,n.kt)("td",{parentName:"tr",align:null},"0 OVER"),(0,n.kt)("td",{parentName:"tr",align:null},"10M OVER")))))}m.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/197156b8.28c8d439.js b/assets/js/197156b8.28c8d439.js new file mode 100644 index 0000000..42fb298 --- /dev/null +++ b/assets/js/197156b8.28c8d439.js @@ -0,0 +1 @@ +"use strict";(self.webpackChunkover_docs=self.webpackChunkover_docs||[]).push([[579],{3905:(e,t,r)=>{r.d(t,{Zo:()=>d,kt:()=>h});var n=r(7294);function a(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function i(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function o(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(a[r]=e[r])}return a}var l=n.createContext({}),u=function(e){var t=n.useContext(l),r=t;return e&&(r="function"==typeof e?e(t):o(o({},t),e)),r},d=function(e){var t=u(e.components);return n.createElement(l.Provider,{value:t},e.children)},c="mdxType",m={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},p=n.forwardRef((function(e,t){var r=e.components,a=e.mdxType,i=e.originalType,l=e.parentName,d=s(e,["components","mdxType","originalType","parentName"]),c=u(r),p=a,h=c["".concat(l,".").concat(p)]||c[p]||m[p]||i;return r?n.createElement(h,o(o({ref:t},d),{},{components:r})):n.createElement(h,o({ref:t},d))}));function h(e,t){var r=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var i=r.length,o=new Array(i);o[0]=p;var s={};for(var l in t)hasOwnProperty.call(t,l)&&(s[l]=t[l]);s.originalType=e,s[c]="string"==typeof e?e:a,o[1]=s;for(var u=2;u{r.r(t),r.d(t,{assets:()=>l,contentTitle:()=>o,default:()=>m,frontMatter:()=>i,metadata:()=>s,toc:()=>u});var n=r(7462),a=(r(7294),r(3905));const i={title:"Token Distribution",description:"An introduction for distribution information of OVER",lang:"en"},o=void 0,s={unversionedId:"learn/key-features/tokenomics/distribution",id:"learn/key-features/tokenomics/distribution",title:"Token Distribution",description:"An introduction for distribution information of OVER",source:"@site/docs/learn/key-features/tokenomics/distribution.md",sourceDirName:"learn/key-features/tokenomics",slug:"/learn/key-features/tokenomics/distribution",permalink:"/learn/key-features/tokenomics/distribution",draft:!1,editUrl:"https://github.com/overprotocol/overprotocol.github.io/edit/develop/docs/learn/key-features/tokenomics/distribution.md",tags:[],version:"current",frontMatter:{title:"Token Distribution",description:"An introduction for distribution information of OVER",lang:"en"},sidebar:"learnSidebar",previous:{title:"Tokenomics Overview",permalink:"/learn/key-features/tokenomics/overview"},next:{title:"Deposit and Yield",permalink:"/learn/key-features/tokenomics/feedback"}},l={},u=[{value:"Token Allocation",id:"token-allocation",level:2},{value:"1. Staking Rewards",id:"1-staking-rewards",level:3},{value:"2. DAO Treasury",id:"treasury",level:3},{value:"3. Over Community Access Program(OCAP)",id:"3-over-community-access-programocap",level:3},{value:"4. Others",id:"4-others",level:3},{value:"Staking Rewards",id:"staking-rewards",level:2},{value:"Minimum Guaranteed Rewards",id:"minimum-guaranteed-rewards",level:3},{value:"Adjustable Rewards",id:"adjustable-rewards",level:3}],d={toc:u},c="wrapper";function m(e){let{components:t,...r}=e;return(0,a.kt)(c,(0,n.Z)({},d,r,{components:t,mdxType:"MDXLayout"}),(0,a.kt)("admonition",{type:"info"},(0,a.kt)("p",{parentName:"admonition"},(0,a.kt)("strong",{parentName:"p"},"Disclaimer"),": The tokenomics details, including the distribution and allocation mechanisms, are still under development and may be adjusted before the mainnet launch to better serve the evolving needs of the OverProtocol ecosystem.")),(0,a.kt)("p",null,"OverProtocol has a supply schedule that releases 1 billion OVER over 10 years. Upon entering the 11th year, no additional tokens will be issued."),(0,a.kt)("h2",{id:"token-allocation"},"Token Allocation"),(0,a.kt)("h3",{id:"1-staking-rewards"},"1. Staking Rewards"),(0,a.kt)("p",null,"30% of the total tokens, equating to 300 million OVER, will be distributed over 10 years. The issuance plan allocates 200 million OVER as the minimum guaranteed reward, distributed in equal annual amounts to stakers. The remaining 100 million OVER will be used as an adjustable reward, modulated in real-time by the system without human intervention, based on the desired staking quantity. Further details are described ",(0,a.kt)("a",{parentName:"p",href:"#staking-rewards"},"below"),"."),(0,a.kt)("h3",{id:"treasury"},"2. DAO Treasury"),(0,a.kt)("p",null,"The DAO Treasury is a pivotal component in the sustainable growth and development of the OverProtocol ecosystem, with the goal of supporting DeFi, Layer 2 solutions, and other ecosystem initiatives. It will introduce governance mechanisms allowing participation from node validators, Nethers stakers, and potentially other contributors in deciding the allocation of treasury funds. The treasury will be funded by a linear distribution of 10% of the total supply (100 million OVER) over ten years, along with the base fee from transaction fees being allocated to the treasury."),(0,a.kt)("h3",{id:"3-over-community-access-programocap"},"3. Over Community Access Program(OCAP)"),(0,a.kt)("p",null,"Of the total supply, 15% is initially allocated to the OCAP. OCAP facilitates the distribution of OVER in various ways, such as airdrops for early community members and contributors, or through liquidity provision. The goal is to make participation in OverProtocol accessible to those who share our vision."),(0,a.kt)("h3",{id:"4-others"},"4. Others"),(0,a.kt)("p",null,"The remaining 450 million OVER tokens are earmarked for distribution to four entities: Core\nContributors, Investors, Over Technologies, and the Over Foundation. The distribution will follow a\n2-year schedule, which includes a 6-month cliff and 18 months of linear vesting."),(0,a.kt)("img",{src:"/img/alloc_chart.png",style:{width:500},alt:"alloc_chart"}),(0,a.kt)("h2",{id:"staking-rewards"},"Staking Rewards"),(0,a.kt)("h3",{id:"minimum-guaranteed-rewards"},"Minimum Guaranteed Rewards"),(0,a.kt)("p",null,"OverProtocol's token issuance plan allocates 200 million OVER as the minimum guaranteed reward, distributed in equal annual amounts to stakers. These rewards are proportionally distributed to validators based on their staking balance and required participation rate every epoch."),(0,a.kt)("h3",{id:"adjustable-rewards"},"Adjustable Rewards"),(0,a.kt)("p",null,"The actual release of staking rewards is adjusted by the protocol's predefined feedback mechanism. This mechanism acts as a reserve system to modulate the issuance levels: it reduces actual issuance when staking rewards are sufficient and increases issuance when additional rewards are needed. These adjustments are made based on the current staking rate, ranging between a minimum guaranteed reward of 20 million OVER per year to a maximum of 30 million OVER per year. Importantly, this reserve system is not managed by external entities but is governed by an internal feedback mechanism within the protocol. Refer to ",(0,a.kt)("a",{parentName:"p",href:"/learn/key-features/tokenomics/feedback"},"this page")," for a comprehensive overview of the feedback mechanism."),(0,a.kt)("p",null,"After the 10-year issuance period is over, any remaining reserve for the adjustable rewards will continue to be distributed to stakers."),(0,a.kt)("table",null,(0,a.kt)("thead",{parentName:"table"},(0,a.kt)("tr",{parentName:"thead"},(0,a.kt)("th",{parentName:"tr",align:null},"Year"),(0,a.kt)("th",{parentName:"tr",align:null},"Minimum Issuance"),(0,a.kt)("th",{parentName:"tr",align:null},"Maximum Issuance"))),(0,a.kt)("tbody",{parentName:"table"},(0,a.kt)("tr",{parentName:"tbody"},(0,a.kt)("td",{parentName:"tr",align:null},"Year 1 ~ 10"),(0,a.kt)("td",{parentName:"tr",align:null},"20M OVER"),(0,a.kt)("td",{parentName:"tr",align:null},"30M OVER")),(0,a.kt)("tr",{parentName:"tbody"},(0,a.kt)("td",{parentName:"tr",align:null},"Year 11 ~"),(0,a.kt)("td",{parentName:"tr",align:null},"0 OVER"),(0,a.kt)("td",{parentName:"tr",align:null},"10M OVER")))))}m.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/runtime~main.80b4a2af.js b/assets/js/runtime~main.7065dc27.js similarity index 97% rename from assets/js/runtime~main.80b4a2af.js rename to assets/js/runtime~main.7065dc27.js index b5cab61..5c45f4f 100644 --- a/assets/js/runtime~main.80b4a2af.js +++ b/assets/js/runtime~main.7065dc27.js @@ -1 +1 @@ -(()=>{"use strict";var e,t,r,a,o,f={},c={};function n(e){var t=c[e];if(void 0!==t)return t.exports;var r=c[e]={exports:{}};return f[e].call(r.exports,r,r.exports,n),r.exports}n.m=f,e=[],n.O=(t,r,a,o)=>{if(!r){var f=1/0;for(b=0;b=o)&&Object.keys(n.O).every((e=>n.O[e](r[d])))?r.splice(d--,1):(c=!1,o0&&e[b-1][2]>o;b--)e[b]=e[b-1];e[b]=[r,a,o]},n.n=e=>{var t=e&&e.__esModule?()=>e.default:()=>e;return n.d(t,{a:t}),t},r=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,n.t=function(e,a){if(1&a&&(e=this(e)),8&a)return e;if("object"==typeof e&&e){if(4&a&&e.__esModule)return e;if(16&a&&"function"==typeof e.then)return e}var o=Object.create(null);n.r(o);var f={};t=t||[null,r({}),r([]),r(r)];for(var c=2&a&&e;"object"==typeof c&&!~t.indexOf(c);c=r(c))Object.getOwnPropertyNames(c).forEach((t=>f[t]=()=>e[t]));return f.default=()=>e,n.d(o,f),o},n.d=(e,t)=>{for(var r in t)n.o(t,r)&&!n.o(e,r)&&Object.defineProperty(e,r,{enumerable:!0,get:t[r]})},n.f={},n.e=e=>Promise.all(Object.keys(n.f).reduce(((t,r)=>(n.f[r](e,t),t)),[])),n.u=e=>"assets/js/"+({26:"93ff49c2",47:"8c2d38e9",53:"935f2afb",143:"5f02ec4e",169:"6571d487",170:"98fd4a74",206:"a5d2c59e",241:"9ed7639c",382:"8def2dd5",416:"3b6b5936",433:"b97ac028",434:"caac22b6",454:"eff5f417",461:"eff783ef",514:"1be78505",558:"abafa56d",577:"7c85feac",579:"197156b8",633:"0879876d",652:"2b8c0123",803:"7e4ee331",819:"4052b245",822:"63925da8",859:"669e6444",874:"e1a65c49",899:"d4e72995",918:"17896441",971:"c377a04b",974:"72939e70",977:"53c49350",981:"15afe019"}[e]||e)+"."+{26:"496e3cfe",47:"4e01e2c9",53:"5095c349",143:"2c36a1d5",169:"2a82d528",170:"f1b306d4",206:"e5b529de",241:"2766f086",382:"25cc2ffb",416:"ba196860",433:"6ed771ce",434:"a20ff607",454:"359d1b94",461:"dc970584",514:"76395081",558:"58f9a41e",577:"b398bf13",579:"124638a4",633:"a8b13e54",652:"7297d5c0",803:"0cd542b4",819:"9852c472",822:"80398a88",859:"6a0f87d4",874:"55b01fa4",899:"41131e9c",918:"7ad503a5",971:"ccfee8f6",972:"50480502",974:"15a7ef9f",977:"0dbf2de6",981:"3578b96e"}[e]+".js",n.miniCssF=e=>{},n.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),n.o=(e,t)=>Object.prototype.hasOwnProperty.call(e,t),a={},o="over-docs:",n.l=(e,t,r,f)=>{if(a[e])a[e].push(t);else{var c,d;if(void 0!==r)for(var i=document.getElementsByTagName("script"),b=0;b{c.onerror=c.onload=null,clearTimeout(s);var o=a[e];if(delete a[e],c.parentNode&&c.parentNode.removeChild(c),o&&o.forEach((e=>e(r))),t)return t(r)},s=setTimeout(l.bind(null,void 0,{type:"timeout",target:c}),12e4);c.onerror=l.bind(null,c.onerror),c.onload=l.bind(null,c.onload),d&&document.head.appendChild(c)}},n.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},n.p="/",n.gca=function(e){return e={17896441:"918","93ff49c2":"26","8c2d38e9":"47","935f2afb":"53","5f02ec4e":"143","6571d487":"169","98fd4a74":"170",a5d2c59e:"206","9ed7639c":"241","8def2dd5":"382","3b6b5936":"416",b97ac028:"433",caac22b6:"434",eff5f417:"454",eff783ef:"461","1be78505":"514",abafa56d:"558","7c85feac":"577","197156b8":"579","0879876d":"633","2b8c0123":"652","7e4ee331":"803","4052b245":"819","63925da8":"822","669e6444":"859",e1a65c49:"874",d4e72995:"899",c377a04b:"971","72939e70":"974","53c49350":"977","15afe019":"981"}[e]||e,n.p+n.u(e)},(()=>{var e={303:0,532:0};n.f.j=(t,r)=>{var a=n.o(e,t)?e[t]:void 0;if(0!==a)if(a)r.push(a[2]);else if(/^(303|532)$/.test(t))e[t]=0;else{var o=new Promise(((r,o)=>a=e[t]=[r,o]));r.push(a[2]=o);var f=n.p+n.u(t),c=new Error;n.l(f,(r=>{if(n.o(e,t)&&(0!==(a=e[t])&&(e[t]=void 0),a)){var o=r&&("load"===r.type?"missing":r.type),f=r&&r.target&&r.target.src;c.message="Loading chunk "+t+" failed.\n("+o+": "+f+")",c.name="ChunkLoadError",c.type=o,c.request=f,a[1](c)}}),"chunk-"+t,t)}},n.O.j=t=>0===e[t];var t=(t,r)=>{var a,o,f=r[0],c=r[1],d=r[2],i=0;if(f.some((t=>0!==e[t]))){for(a in c)n.o(c,a)&&(n.m[a]=c[a]);if(d)var b=d(n)}for(t&&t(r);i{"use strict";var e,t,r,a,o,f={},c={};function n(e){var t=c[e];if(void 0!==t)return t.exports;var r=c[e]={exports:{}};return f[e].call(r.exports,r,r.exports,n),r.exports}n.m=f,e=[],n.O=(t,r,a,o)=>{if(!r){var f=1/0;for(b=0;b=o)&&Object.keys(n.O).every((e=>n.O[e](r[d])))?r.splice(d--,1):(c=!1,o0&&e[b-1][2]>o;b--)e[b]=e[b-1];e[b]=[r,a,o]},n.n=e=>{var t=e&&e.__esModule?()=>e.default:()=>e;return n.d(t,{a:t}),t},r=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,n.t=function(e,a){if(1&a&&(e=this(e)),8&a)return e;if("object"==typeof e&&e){if(4&a&&e.__esModule)return e;if(16&a&&"function"==typeof e.then)return e}var o=Object.create(null);n.r(o);var f={};t=t||[null,r({}),r([]),r(r)];for(var c=2&a&&e;"object"==typeof c&&!~t.indexOf(c);c=r(c))Object.getOwnPropertyNames(c).forEach((t=>f[t]=()=>e[t]));return f.default=()=>e,n.d(o,f),o},n.d=(e,t)=>{for(var r in t)n.o(t,r)&&!n.o(e,r)&&Object.defineProperty(e,r,{enumerable:!0,get:t[r]})},n.f={},n.e=e=>Promise.all(Object.keys(n.f).reduce(((t,r)=>(n.f[r](e,t),t)),[])),n.u=e=>"assets/js/"+({26:"93ff49c2",47:"8c2d38e9",53:"935f2afb",143:"5f02ec4e",169:"6571d487",170:"98fd4a74",206:"a5d2c59e",241:"9ed7639c",382:"8def2dd5",416:"3b6b5936",433:"b97ac028",434:"caac22b6",454:"eff5f417",461:"eff783ef",514:"1be78505",558:"abafa56d",577:"7c85feac",579:"197156b8",633:"0879876d",652:"2b8c0123",803:"7e4ee331",819:"4052b245",822:"63925da8",859:"669e6444",874:"e1a65c49",899:"d4e72995",918:"17896441",971:"c377a04b",974:"72939e70",977:"53c49350",981:"15afe019"}[e]||e)+"."+{26:"496e3cfe",47:"4e01e2c9",53:"5095c349",143:"2c36a1d5",169:"2a82d528",170:"f1b306d4",206:"e5b529de",241:"2766f086",382:"25cc2ffb",416:"ba196860",433:"6ed771ce",434:"a20ff607",454:"359d1b94",461:"dc970584",514:"76395081",558:"58f9a41e",577:"b398bf13",579:"28c8d439",633:"a8b13e54",652:"7297d5c0",803:"0cd542b4",819:"9852c472",822:"80398a88",859:"6a0f87d4",874:"55b01fa4",899:"41131e9c",918:"7ad503a5",971:"ccfee8f6",972:"50480502",974:"15a7ef9f",977:"0dbf2de6",981:"3578b96e"}[e]+".js",n.miniCssF=e=>{},n.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),n.o=(e,t)=>Object.prototype.hasOwnProperty.call(e,t),a={},o="over-docs:",n.l=(e,t,r,f)=>{if(a[e])a[e].push(t);else{var c,d;if(void 0!==r)for(var i=document.getElementsByTagName("script"),b=0;b{c.onerror=c.onload=null,clearTimeout(s);var o=a[e];if(delete a[e],c.parentNode&&c.parentNode.removeChild(c),o&&o.forEach((e=>e(r))),t)return t(r)},s=setTimeout(l.bind(null,void 0,{type:"timeout",target:c}),12e4);c.onerror=l.bind(null,c.onerror),c.onload=l.bind(null,c.onload),d&&document.head.appendChild(c)}},n.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},n.p="/",n.gca=function(e){return e={17896441:"918","93ff49c2":"26","8c2d38e9":"47","935f2afb":"53","5f02ec4e":"143","6571d487":"169","98fd4a74":"170",a5d2c59e:"206","9ed7639c":"241","8def2dd5":"382","3b6b5936":"416",b97ac028:"433",caac22b6:"434",eff5f417:"454",eff783ef:"461","1be78505":"514",abafa56d:"558","7c85feac":"577","197156b8":"579","0879876d":"633","2b8c0123":"652","7e4ee331":"803","4052b245":"819","63925da8":"822","669e6444":"859",e1a65c49:"874",d4e72995:"899",c377a04b:"971","72939e70":"974","53c49350":"977","15afe019":"981"}[e]||e,n.p+n.u(e)},(()=>{var e={303:0,532:0};n.f.j=(t,r)=>{var a=n.o(e,t)?e[t]:void 0;if(0!==a)if(a)r.push(a[2]);else if(/^(303|532)$/.test(t))e[t]=0;else{var o=new Promise(((r,o)=>a=e[t]=[r,o]));r.push(a[2]=o);var f=n.p+n.u(t),c=new Error;n.l(f,(r=>{if(n.o(e,t)&&(0!==(a=e[t])&&(e[t]=void 0),a)){var o=r&&("load"===r.type?"missing":r.type),f=r&&r.target&&r.target.src;c.message="Loading chunk "+t+" failed.\n("+o+": "+f+")",c.name="ChunkLoadError",c.type=o,c.request=f,a[1](c)}}),"chunk-"+t,t)}},n.O.j=t=>0===e[t];var t=(t,r)=>{var a,o,f=r[0],c=r[1],d=r[2],i=0;if(f.some((t=>0!==e[t]))){for(a in c)n.o(c,a)&&(n.m[a]=c[a]);if(d)var b=d(n)}for(t&&t(r);i Getting Started | OverProtocol Docs - +

Getting Started

Welcome to the OverProtocol developer documentation! This guide is designed to help developers set up and prepare for building applications on OverProtocol. Before diving into coding, there are a few crucial components you need to have in place to ensure a smooth and efficient development process.

Setting Up a Node with RPC Access

To interact with the OverProtocol network, you'll need access to a node capable of handling Remote Procedure Calls (RPC). This will enable you to query and interact with the network, deploy contracts, and perform transactions programmatically.

Options for Setting Up a Node:

  • Running Your Own Node: Setting up and maintaining your own node gives you full control over network interactions. This can be done by following the setup instructions. Running your own node is beneficial for extensive development work that requires high levels of data integrity and privacy.
  • Using Public Nodes: If you prefer not to manage your own node, you can use publicly available RPC endpoints. These are provided by various services and can be accessed easily, though they might come with limitations on the rate of requests and reduced control over the node configuration.

OverProtocol Testnet Configuration

tip

When working with OverProtocol, especially in a testnet environment, it's important to note that testnet configurations and details may change at any time. This variability is typical of test environments, which are often updated or reset to test new features and improvements in the blockchain protocol.

Dolphin Testnet

The Dolphin testnet operates with the goal of providing an environment identical to that of the mainnet. Additionally, this testnet serves the role of applying and testing updates before they are implemented on the mainnet.

KeyValue
NetworkOverProtocol Dolphin
RPC URLYOUR_RPC_URL
Chain ID541762
Currency symbolOVER
Block Explorer URLhttps://dolphin.view.over.network/
SweepEpoch648000 (about 90 days)

Preparing an Account with OVER Tokens

Developing on OverProtocol typically requires interacting with the network, which can include transaction fees or testing token transactions. Therefore, it's essential to have an account loaded with OVER tokens.

Setting Up Your Developer Account:

  • Acquire OVER Tokens: If you are working on the main network, you'll need to acquire OVER tokens, which can be done through exchanges or from other token holders.
  • Testnet Tokens: For testing purposes, you can use the OverProtocol testnet, where tokens can often be acquired for free from a faucet that distributes small amounts of tokens for development use. You can receive a certain amount of OVER testnet tokens every day from OverWallet.
  • Secure Your Account: Ensure that your account is secure, especially if you are working with real tokens. Utilize hardware wallets or secure key management solutions to protect your private keys and account credentials.
- + \ No newline at end of file diff --git a/developers/build-your-contract.html b/developers/build-your-contract.html index 993f15a..4e00aa7 100644 --- a/developers/build-your-contract.html +++ b/developers/build-your-contract.html @@ -4,13 +4,13 @@ Build Your Contract | OverProtocol Docs - +

Build Your Contract

To build and deploy your decentralized application (dApp) on OverProtocol, you can use Ethereum-compatible development environments like Hardhat, Foundry or Remix. Each tool has its own setup and configuration process, but generally, you'll need to make adjustments to your project’s network configuration to connect with the OverProtocol network.

- + \ No newline at end of file diff --git a/developers/build-your-contract/deploy-your-contract.html b/developers/build-your-contract/deploy-your-contract.html index a1eed25..be7aa97 100644 --- a/developers/build-your-contract/deploy-your-contract.html +++ b/developers/build-your-contract/deploy-your-contract.html @@ -4,13 +4,13 @@ Deploy Your Contract | OverProtocol Docs - +

Deploy Your Contract

caution

While OverProtocol is EVM-compatible, there are important differences that developers should be aware of. Please refer to the documentation thoroughly before proceeding to ensure you understand these distinctions.

OverProtocol's compatibility with the Ethereum Virtual Machine (EVM) allows you to leverage various Ethereum development environments to build and deploy your smart contracts. This guide outlines how to use popular tools like Foundry, Hardhat and Remix for developing on OverProtocol. Detailed steps and tips will ensure you understand the nuances of deploying effectively in each environment.

Build With Foundry

Foundry is a fast, portable, and modular toolkit for Ethereum application development. For detailed information and further utilization of Foundry, please refer to the official documentation.

Installation

Install Foundry by following the instructions on Foundry's GitHub repository and the installation guide.

Foundry consists of:

  • Forge: Ethereum testing framework (like Truffle, Hardhat and DappTools).
  • Cast: Swiss army knife for interacting with EVM smart contracts, sending transactions and getting chain data.
  • Anvil: Local Ethereum node, akin to Ganache, Hardhat Network.
  • Chisel: Fast, utilitarian, and verbose solidity REPL.

Creating a New Project

To start a new project with Foundry, use forge init

$ forge init hello_foundry

Let's check out what forge generated for us:

$ cd hello_foundry
$ ls

The default template comes with one dependency installed: Forge Standard Library. This is the preferred testing library used for Foundry projects. Additionally, the template also comes with an empty starter contract and a simple test.

We can build the project with forge build:

$ forge build
Compiling 27 files with 0.8.19
Solc 0.8.19 finished in 1.16s
Compiler run successful!

And run the tests with forge test:

$ forge test
No files changed, compilation skipped

Ran 2 tests for test/Counter.t.sol:CounterTest
[PASS] testFuzz_SetNumber(uint256) (runs: 256, μ: 30454, ~: 31310)
[PASS] test_Increment() (gas: 31325)
Suite result: ok. 2 passed; 0 failed; 0 skipped; finished in 9.15ms (8.89ms CPU time)

Ran 1 test suite in 13.66ms (9.15ms CPU time): 2 tests passed, 0 failed, 0 skipped (2 total tests)

You’ll notice that two new directories have popped up: out and cache.

The out directory contains your contract artifact, such as the ABI, while the cache is used by forge to only recompile what is necessary.

Deploying Your Contracts

You can find out the official Foundry's documentation here.

Forge can deploy smart contracts to a given network with the forge create command.

To deploy a contract, you must provide a RPC URL (env: ETH_RPC_URL) and the private key of the account that will deploy the contract.

To deploy MyContract to a network:

$ forge create --rpc-url <your_rpc_url> --private-key <your_private_key> src/MyContract.sol:MyContract
Compiling...
No files changed, compilation skipped
Deployer: 0x079E40B71d...
Deployed to: 0x92e9a5A338...
Transaction hash: 0x2c13f01a69...

Solidity files may contain multiple contracts. :MyContract above specifies which contract to deploy from the src/MyContract.sol file.

Use the --constructor-args flag to pass arguments to the constructor:

// SPDX-License-Identifier: UNLICENSED
pragma solidity ^0.8.0;

import {ERC20} from "solmate/tokens/ERC20.sol";

contract MyToken is ERC20 {
constructor(
string memory name,
string memory symbol,
uint8 decimals,
uint256 initialSupply
) ERC20(name, symbol, decimals) {
_mint(msg.sender, initialSupply);
}
}

Build With Hardhat

Hardhat is a development environment for Ethereum software. It consists of different components for editing, compiling, debugging and deploying your smart contracts and dApps, all of which work together to create a complete development environment. For detailed information and further utilization of Hardhat, please refer to the official documentation.

Installation

Install Hardhat in your project by following the instructions on Hardhat's Installation Guide.

Creating a New Project

To create the sample project, run npx hardhat init in your project folder:

$ npx hardhat init
888 888 888 888 888
888 888 888 888 888
888 888 888 888 888
8888888888 8888b. 888d888 .d88888 88888b. 8888b. 888888
888 888 "88b 888P" d88" 888 888 "88b "88b 888
888 888 .d888888 888 888 888 888 888 .d888888 888
888 888 888 888 888 Y88b 888 888 888 888 888 Y88b.
888 888 "Y888888 888 "Y88888 888 888 "Y888888 "Y888

👷 Welcome to Hardhat v2.22.3 👷‍

? What do you want to do? …
❯ Create a JavaScript project
Create a TypeScript project
Create a TypeScript project (with Viem)
Create an empty hardhat.config.js
Quit

Compiling Your Contracts

Next, if you take a look in the contracts/ folder, you'll see Lock.sol:

// SPDX-License-Identifier: UNLICENSED
pragma solidity ^0.8.24;

// Uncomment this line to use console.log
// import "hardhat/console.sol";

contract Lock {
uint public unlockTime;
address payable public owner;

event Withdrawal(uint amount, uint when);

constructor(uint _unlockTime) payable {
require(
block.timestamp < _unlockTime,
"Unlock time should be in the future"
);

unlockTime = _unlockTime;
owner = payable(msg.sender);
}

function withdraw() public {
// Uncomment this line, and the import of "hardhat/console.sol", to print a log in your terminal
// console.log("Unlock time is %o and block timestamp is %o", unlockTime, block.timestamp);

require(block.timestamp >= unlockTime, "You can't withdraw yet");
require(msg.sender == owner, "You aren't the owner");

emit Withdrawal(address(this).balance, block.timestamp);

owner.transfer(address(this).balance);
}
}

To compile it, simply run:

npx hardhat compile

Testing Your Contracts

Your project comes with tests that use Mocha, Chai, Ethers.js and Hardhat Ignition.

If you take a look in the test/ folder, you'll see a test file.

You can run your tests with npx hardhat test.

Deploying Your Contracts

Next, to deploy the contract we will use a Hardhat Ignition module.

Before run the module, we have to update hardhat.config.js.

// hardhat.config.js
require("@nomicfoundation/hardhat-toolbox");
require("dotenv").config();

/** @type import('hardhat/config').HardhatUserConfig */
module.exports = {
solidity: "0.8.24",
networks: {
over: {
url: OVER_RPC_URL,
accounts: [process.env.PRIVATE_KEY],
},
},
};

And run the following command to deploy your contract:

$ npx hardhat ignition deploy ignition/modules/Lock.js --network over
Deploying [ LockModule ]

Batch #1
Executing LockModule#Lock...
Batch #1
Executed LockModule#Lock

[ LockModule ] successfully deployed 🚀

Deployed Addresses

LockModule#Lock - 0x194B734f7f...

Build With Remix

Remix IDE is an open-source web and desktop application for creating and deploying Smart Contracts. For comprehensive guidance and advanced features of Remix, please refer to the official documentation.

Using Remix with OverProtocol

Access

Open Remix IDE in your web browser to begin. You can access it at https://remix.ethereum.org.

Connect

  • Configure MetaMask (or its alternatives): Ensure MetaMask or a similar compatible browser extension is installed in your browser and configured for the OverProtocol network.
  • Connect to OverProtocol: In the "Deploy & Run Transactions" plugin within Remix, select "Injected Web3" to connect Remix with the OverProtocol node through MetaMask.

Load Contracts

  • Write or Import Contracts: You can either write new smart contracts directly in the Remix editor or import existing files into the Remix environment.

Compile

  • Compile Contracts: Use Remix's Solidity compiler to compile your contracts. Make sure to select the appropriate compiler version that matches your contract's pragma statement.

Deploy

  • Deploy Contracts: Once compiled, deploy your contracts to OverProtocol by clicking on the "Deploy" button. Ensure that the correct environment (OverProtocol) and account are selected.

By following these steps, you can efficiently develop, test, and deploy smart contracts on OverProtocol, leveraging the powerful features of Remix IDE to enhance your development workflow.

- + \ No newline at end of file diff --git a/developers/build-your-contract/developer-tools.html b/developers/build-your-contract/developer-tools.html index 8b6a1b4..3fa9d55 100644 --- a/developers/build-your-contract/developer-tools.html +++ b/developers/build-your-contract/developer-tools.html @@ -4,13 +4,13 @@ Developer Tools | OverProtocol Docs - +

Developer Tools

This section offers an overview of the developer tools available for OverProtocol. Since OverProtocol is EVM-compatible, developers familiar with creating DApps on other EVM chains will find a seamless transition to building on OverProtocol.

Additionally, we are in the process of developing OverProtocol-specific, developer-friendly tools aimed at further lowering the entry barrier for application builders. Stay tuned for updates!

Smart Contract Programming Languages

Development Environments

Frontend Libraries

Wallets

- + \ No newline at end of file diff --git a/developers/client-apis.html b/developers/client-apis.html index 389c4c5..f8f25b4 100644 --- a/developers/client-apis.html +++ b/developers/client-apis.html @@ -4,7 +4,7 @@ Client APIs | OverProtocol Docs - + @@ -31,7 +31,7 @@ See eth_getFilterChanges

Example

// Request
curl -X POST --data '{"jsonrpc":"2.0","method":"eth_getLogs","params":[{"topics":["0x000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b"]}],"id":74}'

Result see eth_getFilterChanges

ethanos_getRestorationProof

Returns the restoration proof for the account of given address.

Parameters

  1. DATA, 20 Bytes - address to get the restoration proof for.
  2. QUANTITY, integer - target sweep epoch to restore the account from.
params: ["0x079E40B71d9DffE9Fd69706F148bf85fAE824E6a", "0"]

Returns

Object - RestorationProofResult

  • proof: DATA - the proof for the restoration of the account.
  • EpochCoverage: QUANTITY, integer - the epoch coverage of the account after restoration.
  • RestoredBalance: QUANTITY, integer - the restored balance of the account after restoration.

Example

// Request
curl -X POST --data '{"jsonrpc":"2.0","method":"ethanos_getBalance","params":["0x079E40B71d9DffE9Fd69706F148bf85fAE824E6a", "0"],"id":1}'
// Result
{
"jsonrpc": "2.0",
"id": 1,
"result": {
"proof": "0xf91e44f90abfb90214f90211a0921b457ca5cab880d444494ab9a4d08abb1bb2996d2c5cd4e99788ce0e1e4eaba04d9d9d931d26fdb11365c3835a11bf2333919d684d2e10a4fb43e82ae7894975a03382f8bef7fd6d6c60220de06c0781c6c8c935195f530c68156941da58a3cc23a0d8ddf7800e7112f6aa7a1515a4749599fc2c4a204f2907bff696fb6d1becf4b3a046f57ae90e21076160e21426857feb46eb07feba57a432abdcce23b9f642f37aa0c33aa01a97487feb99b025e11fa1b3673ff05c42335b5036108ea413b1928c88a0248bf159ee60a7e00dd92758540b0c6e830c7ad615708fda6e6a61f892962db4a0d1303290568ee40afd4369af636946fa273747fbda5ee8fdfe0d8ce04b7dd3cca010cf90ed5f27824a00b26ca3ef4f7e77f5a2548a5eaefa0393ad6ddd9b322ed2a021412560b8c640300b4dc0be19639cf6c8667cd93971debf12492b29bb4cdf5aa0aa4db5b60220d6d9a22480ba27833ffd46a6fd4f85e6ed3d36ac1df900a90ceaa05ad9c6384078e77e3550115cfc6b71dbada27d1c6f1fe9d2ee99e0cf7142b546a08b469e837f63b47f70945077a2af4df142115427121592841dff8f990ba56298a0ea3788edd098f081cd4834e98fe3380a53a360b44a2095a6128a81c35104dde2a05582358db49fa21a739711283a514ca3ceb298ec5290e401a31c369258efb379a0de7b75a9afd2cc594bf612a938e252f52dade9fb2daed2ea96eb2d52c662b51880b90214f90211a0264dd745ce4864e9adf03f296cb29caf0276dc743b69294185226b81263a5e80a0399c13b65957c8f197582602312bf69d03f48b5bf160a8b082080d45c25efddca07694be583f9817cdbb7f137e1e5c7e1f6c79ec4b31797ac5ed7c27c8e880fa5ea065ed634cd94c91e05e16083da8c8bcca369cb1c0121e766fe473033b03a9b0dea0817e7cab8f04ea4ee085e121f6ea737b99967a2edf2107d3f7fdcbe8edd96e6da0110680eebb8af2a93f703f738f5a1a86e432016f5adefe4812a4f28c6bab0c49a0677f33471cdadf74abaa045ad2fa4a1fca4c245af738669ab830501fec563717a0fad5f230f6a0067c5cee4c22e34736f7cf5d92bd7106fff954bfa5139b916baaa052bdcbb603a300da7f86028b523bc27731b695f0b131ceed1f8135e458ffe175a07d82a78e01083e1a2536a37237ca353d575117b03319b70c7cfee8186c698770a04ba8de15982b4eb60071fbde767929c91d55d779f310452059bfcedd8db521a0a0b6b72a55db7fa17ad0fe48c7ac78cb093f2fe8f44b798ba2c5eba329ee712fe5a0f5de7e491bb7386db7f999f2548ace17b874bf4584f7796f8440df08dd681b0ba08c4fa72225ae7bc1d92c6dbcc7909f0ce352179e7b168d062929e1e8cd98fb47a0e7f3e4fe415f28b2c815769993dabb978f775e05b84bf02658b037e326794dafa0b5ce7ed0b01a92b33428c97e117ca0084fe1000cb1f2fafe0fed7ecf1526dab780b90214f90211a0083f0fe70dc04edb470a3bc79a25672559282e9058bb30423c39c7ff0c17a7bda061c41c924f03a5f36c4d3f7738adad0e5eb25827e91cb97b85f7d23b4619e7aea0cc5aa3da9242559e049e472042ed85ed1d185b36286fb5d992357066b5dab8caa0aebe9970d7554e30afd5658f46d661bd21f8ca865bf3ee586279405c5330d2b4a059d0a6742fc3f07ac78c6ef8fa40855dab5720d6c9017424d913ba259a72423ba0fb5e60d619acfacef80e8c15147d2267131fcf992bb1987cd506f1a5a81fb29aa0c362fa76a44a4a5560a906e063eeffd83c109257dc4447bb5be5a36d6718f434a05213a28d9d6944c377ec4d1a97fd8157eaf006e08d4ea0f33af976d884bb42a9a052f155ef1de15f89382c4b78c67773d35fce402b4c22290ae66ca60cef34ad90a01a39f508d455cfa89ef759c142c411782b3c440179a3131c838c78cbee2bfdd7a043ccbcac50c750c2fd5d126ea836a63d385192437dc9f4403b4ef49ab17b0c10a0f5ef7fac890c5a7d54368c2e12b84df261ac3ca662932d89f2ad2f3d7d4cec1da061bd793ee1af0b57cd4b967fee461a92d4f24ff28f44ce639145fc67a5fc3399a06205d72392bae0f8d42866a5bbbcda5f376a7d21a37ef68bea9e585f46242b9ca029ae9b6983ec4a9e512b4bc4d459d2f48e2125bfb12f52cc2ad706cdca503aeda0ffbc4c5431ec5101a76b8e0468e9f12ba12aa8dcf8c432b0d607fdf2593805c680b90214f90211a075f1b5a673a0c8db8e5d6f57f6655f829d97d247e0395c0ea07a53dc0f645075a0a2d1b5adc5a15eb4dc82ee9a12939211a41abd4887742bb306b84dac28799f71a05f19d6deb9bf9b412aa0e887050131f707647d5dc463f03933eb22283137e4a5a07a04d897c1363ef3233c967c801abaa4e5b794586e3f87e88357ef65afbad88aa09ac622df4309d5aa1cd072a717acd476920fab0dd5b2d2d4e1d8a4aec3070377a0c185f5fdd6f381573ec7ac2bf0b6418fc4d7d413cff3b92fe100d01521891bdda0de97b0a33079a86823819d4e3c30cd7355e4d6ff7043f5d77884ec8558b8486aa05187a9e0ab9daba24a737f5c755d6a90d47e4c4d7928f4309f393bac07ff0f58a08e6f2bf12ebfa31462530ba379e02aea153d5fbfa2207f3e4d62a24c0c069918a06095f062eb41655615a303f3b2fb7e5d88a28515a2035ba456060262c21bf9bca0536fb8b927fc177477f781d0a104e00b29b065119bec236f7e5b3a5835709488a008d9f754555653780bdb189347c8f8ef504cbaaec8b2063b01041ab57257aecea024d5b643068e59bd77111f668a8b007bdae9b3cd82b61d1d3222959b10a344b8a0f177eac983fbe6889af354156f104f0bc8d32825a88a79d7fe18ac775f2536e5a0a733ac91849d8e1878ad2ed4211fa8ecf09499f78f035c0f1072b5b08cec6a78a034d52cbbe43a12c1d05a323982dcab99d821c3db54b0585c3d95a5507b959b2d80b90174f90171a002c3647e70a6520db9f6e4b7dce398c5f23c2b15698af6a98d60eebb020b941ba0322c43d7c0549f984d220467410ce6bdae787c9e3d5586598763038d9b90fee580a0dbe65d579423ef51795e7cf6313f1f49410e8f06e974e99f2d6c8763d683b322a08f9d971196d6db50d983733639bfd89a9358f1a89e7983683ea9a5b027944d7280a0fe33b3e0c73d66612717e3332d2e940be5cd04dedee93f915025aab197705a1b80a01e29f89fcd0a43ae9dc812f7242eb0d826f4508bf34e9b7d3a2fe8adbf90a3e5a0e4831c719a24651724b9fae42e01becb7193cf6f04f45ec24656970a8477b903a0039d88926db573c6cc7d7f6208813b883ef7e0d609324932c0ee62c4bfa36752a0745929db8b604b4fc23c1366283ef97e838294a2741163f1c60110593558f16180a01a142fcb7ef2c7048c1b935a4cd1bec522a2b7c9e6edcb40b14f3de3e4211efba0f11a17bc72bee8d21e617b7783debdff0957971d447ba5cb52e1ce4c3746fc468080b853f851808080808080808080a0abfcc7ebdc4123f5c17ffc2ddaaf8be3bc468ace346a38c4939a7edc9b883ac3808080a0a98734c98e7f915c19999354eb77df2265a76a01642a3fda7d45394789533898808080b895f8939e20c42abc6213ad44d3edec739492e93ab562033bb31355692e15cf9ff668b872f8700302896c6b9232f2a337c232a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a47080f90973b90214f90211a07fb1b83e7b8b8bec9fd3ca2da7617d541293d1e53adb6b44ada80721509ca48fa0550a0cc3c8217986b5ebb8e0dbbc5d21613d49d211194264e56fc5b4f6c46087a0237c96447efedcc7a74ef2121d5e8a5bee94c641d97d314c85b9651a9bd46cfca0fe8ef2dcb93449fa6bbe8cbaf17b71030cd33d0e5dfb4d6e029a32dc2d7cefd6a0b47b31620ebd3a40fd552c53ebe2f88a77af710e86e2ed55351be02fe76874e0a0493db6b587605c2c5aa653d3180f464c8362d77d4b81d2b6e83a1867af9652f4a0959e64c078244199fa7d065a4381f0d9542ba5626d29952e6efb9c048772c6cfa03e8d03360e417f15d36be6286b198e9a189862b1a31a3763062f7f91b6a651b5a047f76d7b23a7b9e2f8978d9c07cc8ffde2d70eef291248dd4bb197585598eb9da09e88b12b1bdf9d6f721347c974f77c43182abc0573e654ab7b5936913375d980a0110fa3d6b558cb814983abe43c51bb93f08bf6b6d1fa6dd0e61889e03f2eeddda09810c48acbd2f7d6d59e823db3e5b507c475012628e612193ffe9987df943639a0d73e4cea2ac3d76ca756dca3b42dbfc0dcc1d67e4fad5b2f2a34393e47b0c15ba09f632659712154b46c95e0e75ccd4d6ca38007de3cea4aaf419cf8661f9b2663a074a04b9ca0cf729a56de0b68f000188f14501e5cb85bb9817d137d106a0545bfa0de2dcf51b3085cdcfc64897382e86eb47fc3a0bb334940fc92a07249e7d3f65880b90214f90211a0d3e699d84e1834a645191428d20f5e2d9b78e32a58c63327de331d26071dd0c5a07bc2822fcc419eeffb7dbe1c249f56ac8f32b83555b0371d8304a7edca8b7bc0a0106256c4c675ece2db65ad4dcc6625b13f2fa7ea34b909d234fbea6a500e1823a0238d685336619defeb66a360130f58ec4b29f9256d64742ce80af83ec91038a4a0d02b1c1e5df4a456f25768b0e6ff2f2971f0424efc652bdf87fb8ba5a22114f0a0beff76afbbb4032ed1807a5bb6f4b751302141f794488c216d79245ea5eed226a0d6b21fc0ea1eb137ab441db0d23186966dbeb57b9c78b0ec322d7d32969699a7a07232d4dda4c8ca59bf23f2a85de201bcf603dbf0f6cffe93028f1748aed111b8a05f8ec9ed56c3bacaa9b4a3c0fe9ff3e5893f41ba6dd78f7a4382635cdbba1f6aa031c97354b7c0e346155391751c5dccb71651543f0668e358468a2516a1e98334a037e451cdd7e7f8cfd062617aaf1456e4bde1b30fd1a763dfc5bd8e986a26cb2ea0665df408c794c48803ba84016fc1ef1824676a5e487952b56a164349d35b91eda06883a5063f47baedc620b2e597b7abd18b15bce7533dbbf473927d3182f779aea0c3d838e10d11e573ee7ca1a8b63e1cd63fc96b0b74b6897b057db529cb493e43a039066f40108a8a0da1ffde37db84818963c873738895d1ad5979b0889ba701ffa0abc78c031d284af5969456f7b66cc02d63fc84efc8133613448cb6f2162e985c80b90214f90211a0d00241a4573e07fea4a11122bddf7a7ee51a5592b7d4a84d49a80cb070b5ff4ea05c09f256c08de361305bd8ff8d9c0e5dc14ac3ab008cd737cc0c9c6c8526713fa02b6af3f2c477d4485748d58507341902d7bbf4b8e5d4eb5b777c15e3e097024fa0526de12787c59d85a598167975a1fe2310d38a40fa64519e17b0fa6cabef1d6aa02de6bdc027c47ff997596254c055f145e5545da05715facbf922fdf089aacf21a0474f9a79b26191164aeb61818d6b26129c422cbf056470d2891e096f870941f3a0cdfaa55ab4ee4727a355b543fa6b80dd56f65d51534ba487749ce3eb1d7ab45fa0a32b981517e195a4fb2afa46f88214422e0f552015bbd384610ad1c0b95229bca00b83f631058ea4c62dd2ae0b0cee41c674ebf4fbd78569390e4dc4ee86637f53a014f482563599b0fe450e290d6f59fde34e5ebac7538103f36870e0e348a9ea42a06d3ab512b91e0236a93e33416f29b5f8b34d3bed871c14de72a70c320db25907a01a4fd439ad7f29ff8bfcd5a799433f04014bd8c6e755979154ca766feee09f2fa00f71c2211c956f0508188f05ef6fc2d3ae80beaa4bdba71a97af7f0c7190f248a07bc61ea97819cdf3eb31bb8cc91099e14aa1634c9cd44e278fed897b924535baa07ff117f435cda85c4cea1b6d0e13185b9625203c6e4a14d186ff6a21bd2ef83aa00b3d1799f9c20053bc8171e084eaa210ae7d342d1a47407229fd17fcda6d294080b90214f90211a0f13038844b268dd796713bdd96d16a650dc1e1ea2a6602104063ebce3a2b517aa0d916798f1feb23dd9f4dc2e92d3e82857b2c8eb378be7461f86fbdfe9587c32da0d4b735d75342a6d1ebccad8386267c814d2793fb58dbaa335930d13b6e7557a3a005679df09478173338e360c8f4e04a7bee38558c1aa92fabde6b273d3d27d7c4a0473667e6ac695ca2c9a71c9578fcdd83012fece43b9ea3ef8b3fc0c3fb8c911ba0ba4ce95cff8419c73f9b6030107fba1d5862a54179df38b10b7461a7f4c67ddba01c7e1c3671c2e9f5f46688a430316b7bbbb1e406445dce96e57c99a717ecf5dca0da5a407f585a1777d1ef482a5b8f293c4ae4d3623ef932e4de61b2f95861b28ba0997d13ff0edf7c2dfeec67e564165ca2c3bdc8d89c2401d532bb4108327c9dfba0324fdabe08dade8a4e2a5fa6a32e4f9f3a50d54591d8e03daa89324672f229d1a06d21df4b34016c7fc5b9b1adba12026a75414dfc667966b63e6543716bd5f96aa0031cbf9182752a856efa22061d63963525b4beb51f6dce4b0fb00ca08e4a3870a0596fd393a9aed9cd8846d665da03117ad5480e162742592aebed13cb6319b353a08e820592d79e6db527c5aac497a95da8c0161117604cdb5b1395332381b15f83a07769421858f5ff961606bfad0f5f5939377459092e0edec0f21b638069e70497a0d3346e376cf2f60823f3980acf3cda2eec76528c4ec7621e9bba6f7e209b670980b90114f90111a02f5944a42ed00bfb5ec4f0721c01e105c5205ce104395f30f85cc459b98ffc6b8080a019fe52ead719dabe7dd9c8dacf890059462016b846644b3f06e2d27bc4f99c22a04a7d1d0c3e5da11099f1a0f8ebeaa63b2ff80948387eb094dc27459efa7dd49680a02ce146b479cce800c1f61c15f667ea13860e23b5ea353ffd85c87d45ebd42a0080a027e98fe5af32f2acbbfeca38f5cee85565ac93d47153711a045b578b77c0faf5a07bd2c015d4f9a6262294cfb848198336851138a5255ec2bc3e090feea31a8f70a050fa7053fe07fc899e6eb004d54771b7b3fa71728819e13f993ae78395cdd8a9808080a0d694b288fbd13e675441821c10b1ddd7e712898df651a1340fa6dce08d29a6fe8080f90a09b90214f90211a0baa19ee5aa6419135dfc76b7d490e21ecbe6ba25a8db18095dfa279696949504a02eae7620d2e66c7a2ff0b59fe86124b4806bcfe0a74436dd2a03b04597f6f3a0a0309723fe79d6271f36743214ade12f343e552a77a3a4f1f08f80781ccf123236a0fa3d159a293aa045c1ac83db674298d0d6521c7e4bc56338de476b1d58d67bf4a0921b171f11457f96fd573e3916b63c9f5c11533db94668ce4dc3b92dc8578c69a03758b74a752274848d258060504fe8d48217a552dd5eb6850f0cdac1235bfd1ca06a1562a38b7d75b059e743442879da6f1cb184bdf7fb74b0c53c36ee7bf8124da0c384216d0760695dc438737dcf55f255ae38fa877a8b3448ffb5564c849d3edfa0e186aa0155465b29deb3dde90aaad2c6c941e26c6ab4d6e86d72f0d342e06c8da0a0d44915e1c36393df792eb7bfb5cd5510600d25e84b451eecc80ec2360b26cba02aa36ce5e855306ebb68fcab801290aa57580fcc2dff5909e8a7b84a223bd04ca0d063728265f95e1e321e739b1ebd8489c8f70cd7eb7dc4ba9defeda1caa7fb79a08108add5235add4656192dde1d73a65a6088a976ed584694eef8814f1dd23076a0c4864b5c04d86dca23a8d1c72461279d62ce262a3d50a0a2c207b723033393b6a0f045efad099a17d520aa13427d5e2cc8f91d230feccadc691d0675b1d5725912a00a809ae625958d1e9e8db4e5e3f819787e766f544ec43a2153dcc5d4fb2997c880b90214f90211a00edefe549ac781aa27af62797d1e6358feec2e3c95c66094fb61d362ed1e341ea0bb959eb3d3da46788d3ab30de4ae9d1437cebee2c548a69938c6610e4fc95979a01394ebb5191df304ff4bfa153c93f0f4839925e7e4dc5c437ab5b6164e24dd12a0cedcca5793d33bb9010a66a91ae063ede14b7b047f662e4e3c6d3a5924dff388a0c70b036294fae16219e27edc7fa98774c5103c6d3591aaac3da90dfe711a3c84a05811ea8da4786ceedcbad0a71f040068bdafdd8999552c0e91e57a7d35e53a83a0f83885b2a884b7f27957e712b779be3f4a5203503cd17794592069a4eaaaf7dca05e643afb568c4549d1657046da736c02aac868e772a1f15d402baa3ddfba6f2fa0133e012b421928cfeabe93386f3e62c48f88899ea0de8a890adc2716e485a942a0205d1f3b2d4239ba56ccb316201b8dc8e6d6643f66356fb9413e80e241811220a01cd1fa77858b948dff2bd31db645a934924dcfeb802cffdef8540d2512ab0663a0d792dbb3071ab745dabca5aeac43f14afbbb103d86056e8b1bab3cfe7db53e51a03e7279336428356b590982907c528d56506b29495a73f2e4939fa4b3843c37cfa0faff06c86f5878925a0c4d4dcd21d02bcdea6a9bde0e24367c8c212cf208c7e0a0cb544e9a5663d03dc83cde8802a20c78a844604db0909b9ea5acfe7964ff9868a03bbf20ecf3b5e7e7d79275554875a2aaa9b8a1d3f1ae36da4bc93f32eaa785b480b90214f90211a075517f3bb1e00ad3a541dd7e83e2ca45335a36efc983d9d48071708e3069a398a03f4b30696a3bbf1d54a6b694841c4986bcbddc98f532e567fb562e285ec83e9ca01b8474bc0cfec95493aed67722b58d550fe7b0601664211b1339068ea64ebabda02e451e9985f5b34e6814c707ad3cbb8bc36d5a765f5c28f15a0342ba94814cb4a0841bc867b83d5937fbee39117ccf2717a64dc2f93759fb3695a018ff5ba8c241a052a7f801a82a32325e3affaffd818aec14159cdef9ff071e410c963f5e134d63a036497963684413dbae7530d91d6d2c608269c759ad78bd35f9cb05159bed7c8da05c42c617a2e6c43d0d4612fb4ca773371575a6749bdf55e3c570af114e17cfe8a05a8c3fbd24a70c60f7cb859719765f5a9e357bd84a4a2ee8e79b0b8596f225a3a07029c0ffd0653ba73cc21e1e93934f399a036a385966242e4716fdc14703f9dca0a9765bbeaccf4d3942bccdb731a6a4035158af890ee6f9da3e963858b33ead4ca0789f979209f6a8aad56b6adfbb519e4b109b8ba374f1a8156600c986cbc2a65ca0f8f8a7f80ae92524d46789d7cbf74892ea08a02c95187e4985b6877f931af476a0c0091f1b5d2d90f8550f066122abe1c7daf43e71cb1908381e717cd059b30effa0b1f8ccbddc27fde6d7aeb52c4521e6dd4cb0ebc130a065a5ea104d7acadccabaa018577fbb14829b3600b4e1c6ac0342002f409853760b18ee475dff24b0f3378080b90214f90211a0082970dbd3403cac201569b5204325ebc54514713decaaeb949d527070daf288a03396fd760cb59bd3826cd034c524f1455b7839b279e6c273a4daa66a2557ab13a0f216e44d523ecb31b70591152f209582b8b6ad760c5a3b7bf8a47d12ddaffb18a0744e5df975cccaeadfabffb7308f4c74b18a51a099cda08461c60d437aa79b79a0e3d1a5662fcc8816dc29a1ddbea0cc9ea798d9d988b6c8cd097d352b3ed16bd2a06a9cc70ab74801ebca1179282453c337f1d75728680714f32347c2a40fc85136a04481f1040c8d42e713085033bef49239813f5c54d4157368e08cba1dc411aaa8a0170823968ede745d9aea05169b9ecb9d77a6a66b40e6492961659bf868cc2189a08f908efdce0d4dc7af1bfc2a2bb2d24f48a409f0c52a36e7dd30b9613f2d8e4aa032138b76c6a7a52e53e20814f7ed388e13775fb3dab416f38b8e410876123857a0b772ae92d5d32854321fae7298febc2f469eaae18a57ac2297af07122027a715a0c3aa96fc6e76aba2139f2f06e2a897de30b41cc4d879717f527e5ddcdf3fbf00a02ad7bd6766f5a399d715e217578e6bc554a5f3e7321c57b799d8e3e41a20b045a0b76b93e0f0262bf2a361820036ff8480d3102604564622381333e0d15a79f2aaa0355fc08ac7ef89b09d00d651fdb452a86f524446e86568bac76484c19e78e6d9a0ffd559bdc905d34b75a619dabbcfeb85d6c931889014579f8d53514c368b7f8280b90114f9011180a0dd9927fad6cc976552dae5a4eb518db22a72c9753821974de342e1de7392ad43a0e85282b3464ef58425eeaf2f8a05bd9c4a102fecf31165fcea5cb85d14db5e86a0ea801f83c4f74619be72556acd921b63a4c59cc27922230ba4d5c7afbd4c830aa0a949cb1c0843ba0f20a11f572f09db2515be2af943cd9cccedb58080cbaaa532a02901d8ecff78bda787e668f0aee43da8bec183715a70bd7739f4b1135d743614a061f637f883b052bf02a52bcd996f164099b53f0fc73087c0684488d61438ea488080a013acdafb98619be1ad6d76f147e53a2f1b7095ead65cf07a5369a33b8d11f2e480808080a001e86426356515f5f3ad6bc2a2e7872ea843377ec41045a1a84eb61b8451a28c8080b894f8929e3d608554927fb5e5820e31339681312cca6a1d288435095aa9d78da49a67b871f86f80808802c68af0bb140000a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a47080",
"restoredEpoch": "0x0",
"restoredBalance": "0x6c6b9232f2a337c232"
}
}

Usage Example

Deploying a contract using JSON_RPC

This section includes a demonstration of how to deploy a contract using only the RPC interface. There are alternative routes to deploying contracts where this complexity is abstracted away—for example, using libraries built on top of the RPC interface such as web3.js and web3.py. These abstractions are generally easier to understand and less error-prone, but it is still helpful to understand what is happening under the hood.

contract Multiply7 {
event Print(uint);
function multiply(uint input) returns (uint) {
Print(input * 7);
return input * 7;
}
}

The first thing to do is make sure the HTTP RPC interface is enabled. This means we supply Geth with the --http flag on startup. In this example we use the Geth node on a private development chain. Using this approach we don't need ether on the real network.

geth --http --dev console 2>>geth.log

This will start the HTTP RPC interface on http://RPC_URL.

We can verify that the interface is running by retrieving the Coinbase address and balance using curl. Please note that data in these examples will differ on your local node. If you want to try these commands, replace the request params in the second curl request with the result returned from the first.

curl --data '{"jsonrpc":"2.0","method":"eth_coinbase", "id":1}' -H "Content-Type: application/json" RPC_URL
{"id":1,"jsonrpc":"2.0","result":["0x9b1d35635cc34752ca54713bb99d38614f63c955"]}

curl --data '{"jsonrpc":"2.0","method":"eth_getBalance", "params": ["0x9b1d35635cc34752ca54713bb99d38614f63c955", "latest"], "id":2}' -H "Content-Type: application/json" RPC_URL
{"id":2,"jsonrpc":"2.0","result":"0x1639e49bba16280000"}

Because numbers are hex encoded, the balance is returned in wei as a hex string. If we want to have the balance in ether as a number we can use web3 from the Geth console.

web3.fromWei("0x1639e49bba16280000", "ether")
// "410"

Now that there is some ether on our private development chain, we can deploy the contract. The first step is to compile the Multiply7 contract to byte code that can be sent to the EVM. To install solc, the Solidity compiler, follow the Solidity documentation. (You might want to use an older solc release to match the version of compiler used for our example.)

The next step is to compile the Multiply7 contract to byte code that can be send to the EVM.

echo 'pragma solidity ^0.4.16; contract Multiply7 { event Print(uint); function multiply(uint input) public returns (uint) { Print(input * 7); return input * 7; } }' | solc --bin

======= <stdin>:Multiply7 =======
Binary:
6060604052341561000f57600080fd5b60eb8061001d6000396000f300606060405260043610603f576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff168063c6888fa1146044575b600080fd5b3415604e57600080fd5b606260048080359060200190919050506078565b6040518082815260200191505060405180910390f35b60007f24abdb5865df5079dcc5ac590ff6f01d5c16edbc5fab4e195d9febd1114503da600783026040518082815260200191505060405180910390a16007820290509190505600a165627a7a7230582040383f19d9f65246752244189b02f56e8d0980ed44e7a56c0b200458caad20bb0029

Now that we have the compiled code we need to determine how much gas it costs to deploy it. The RPC interface has an eth_estimateGas method that will give us an estimate.

curl --data '{"jsonrpc":"2.0","method": "eth_estimateGas", "params": [{"from": "0x9b1d35635cc34752ca54713bb99d38614f63c955", "data": "0x6060604052341561000f57600080fd5b60eb8061001d6000396000f300606060405260043610603f576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff168063c6888fa1146044575b600080fd5b3415604e57600080fd5b606260048080359060200190919050506078565b6040518082815260200191505060405180910390f35b60007f24abdb5865df5079dcc5ac590ff6f01d5c16edbc5fab4e195d9febd1114503da600783026040518082815260200191505060405180910390a16007820290509190505600a165627a7a7230582040383f19d9f65246752244189b02f56e8d0980ed44e7a56c0b200458caad20bb0029"}], "id": 5}' -H "Content-Type: application/json" RPC_URL
{"jsonrpc":"2.0","id":5,"result":"0x1c31e"}

And finally deploy the contract.

curl --data '{"jsonrpc":"2.0","method": "eth_sendTransaction", "params": [{"from": "0x9b1d35635cc34752ca54713bb99d38614f63c955", "gas": "0x1c31e", "data": "0x6060604052341561000f57600080fd5b60eb8061001d6000396000f300606060405260043610603f576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff168063c6888fa1146044575b600080fd5b3415604e57600080fd5b606260048080359060200190919050506078565b6040518082815260200191505060405180910390f35b60007f24abdb5865df5079dcc5ac590ff6f01d5c16edbc5fab4e195d9febd1114503da600783026040518082815260200191505060405180910390a16007820290509190505600a165627a7a7230582040383f19d9f65246752244189b02f56e8d0980ed44e7a56c0b200458caad20bb0029"}], "id": 6}' -H "Content-Type: application/json" RPC_URL
{"id":6,"jsonrpc":"2.0","result":"0xe1f3095770633ab2b18081658bad475439f6a08c902d0915903bafff06e6febf"}

The transaction is accepted by the node and a transaction hash is returned. This hash can be used to track the transaction. The next step is to determine the address where our contract is deployed. Each executed transaction will create a receipt. This receipt contains various information about the transaction such as in which block the transaction was included and how much gas was used by the EVM. If a transaction creates a contract it will also contain the contract address. We can retrieve the receipt with the eth_getTransactionReceipt RPC method.

curl --data '{"jsonrpc":"2.0","method": "eth_getTransactionReceipt", "params": ["0xe1f3095770633ab2b18081658bad475439f6a08c902d0915903bafff06e6febf"], "id": 7}' -H "Content-Type: application/json" RPC_URL
{"jsonrpc":"2.0","id":7,"result":{"blockHash":"0x77b1a4f6872b9066312de3744f60020cbd8102af68b1f6512a05b7619d527a4f","blockNumber":"0x1","contractAddress":"0x4d03d617d700cf81935d7f797f4e2ae719648262","cumulativeGasUsed":"0x1c31e","from":"0x9b1d35635cc34752ca54713bb99d38614f63c955","gasUsed":"0x1c31e","logs":[],"logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","status":"0x1","to":null,"transactionHash":"0xe1f3095770633ab2b18081658bad475439f6a08c902d0915903bafff06e6febf","transactionIndex":"0x0"}}

Our contract was created on 0x4d03d617d700cf81935d7f797f4e2ae719648262. A null result instead of a receipt means the transaction has not been included in a block yet. Wait for a moment and check if your miner is running and retry it.

Interacting with smart contracts

In this example we will be sending a transaction using eth_sendTransaction to the multiply method of the contract.

eth_sendTransaction requires several arguments, specifically from, to and data. From is the public address of our account, and to is the contract address. The data argument contains a payload that defines which method must be called and with which arguments. This is where the ABI (application binary interface) comes into play. The ABI is a JSON file that defines how to define and encode data for the EVM.

The bytes of the payload defines which method in the contract is called. This is the first 4 bytes from the Keccak hash over the function name and its argument types, hex encoded. The multiply function accepts an uint which is an alias for uint256. This leaves us with:

web3.sha3("multiply(uint256)").substring(0, 10)
// "0xc6888fa1"

The next step is to encode the arguments. There is only one uint256, say, the value 6. The ABI has a section which specifies how to encode uint256 types.

int<M>: enc(X) is the big-endian two’s complement encoding of X, padded on the higher-order (left) side with 0xff for negative X and with zero > bytes for positive X such that the length is a multiple of 32 bytes.

This encodes to 0000000000000000000000000000000000000000000000000000000000000006.

Combining the function selector and the encoded argument our data will be 0xc6888fa10000000000000000000000000000000000000000000000000000000000000006.

This can now be sent to the node:

curl --data '{"jsonrpc":"2.0","method": "eth_sendTransaction", "params": [{"from": "0xeb85a5557e5bdc18ee1934a89d8bb402398ee26a", "to": "0x6ff93b4b46b41c0c3c9baee01c255d3b4675963d", "data": "0xc6888fa10000000000000000000000000000000000000000000000000000000000000006"}], "id": 8}' -H "Content-Type: application/json" RPC_URL
{"id":8,"jsonrpc":"2.0","result":"0x759cf065cbc22e9d779748dc53763854e5376eea07409e590c990eafc0869d74"}

Since a transaction was sent, a transaction hash was returned. Retrieving the receipt gives:

{
blockHash: "0xbf0a347307b8c63dd8c1d3d7cbdc0b463e6e7c9bf0a35be40393588242f01d55",
blockNumber: 268,
contractAddress: null,
cumulativeGasUsed: 22631,
gasUsed: 22631,
logs: [{
address: "0x6ff93b4b46b41c0c3c9baee01c255d3b4675963d",
blockHash: "0xbf0a347307b8c63dd8c1d3d7cbdc0b463e6e7c9bf0a35be40393588242f01d55",
blockNumber: 268,
data: "0x000000000000000000000000000000000000000000000000000000000000002a",
logIndex: 0,
topics: ["0x24abdb5865df5079dcc5ac590ff6f01d5c16edbc5fab4e195d9febd1114503da"],
transactionHash: "0x759cf065cbc22e9d779748dc53763854e5376eea07409e590c990eafc0869d74",
transactionIndex: 0
}],
transactionHash: "0x759cf065cbc22e9d779748dc53763854e5376eea07409e590c990eafc0869d74",
transactionIndex: 0
}

The receipt contains a log. This log was generated by the EVM on transaction execution and included in the receipt. The multiply function shows that the Print event was raised with the input times 7. Since the argument for the Print event was a uint256 we can decode it according to the ABI rules which will leave us with the expected decimal 42. Apart from the data it is worth noting that topics can be used to determine which event created the log:

web3.sha3("Print(uint256)")
// "24abdb5865df5079dcc5ac590ff6f01d5c16edbc5fab4e195d9febd1114503da"

This was just a brief introduction into some of the most common tasks, demonstrating direct usage of the JSON-RPC.

- + \ No newline at end of file diff --git a/developers/deployed-contracts.html b/developers/deployed-contracts.html index b6ccae3..5d55986 100644 --- a/developers/deployed-contracts.html +++ b/developers/deployed-contracts.html @@ -4,13 +4,13 @@ Deployed Contracts | OverProtocol Docs - +

Deployed Contracts

To avoid unnecessary redeployment and to streamline your development process, we highly recommend utilizing our already deployed and verified contracts. This approach not only saves time and resources but also ensures that you are integrating with trusted and stable contract implementations.

Here, you can access comprehensive information for each contract, including source code links and ABI (Application Binary Interface) details. Using these verified contracts allows you to quickly integrate and interact with established functionalities on the network.

Dolphin Testnet

nameaddress
PoS deposit0x000000000000000000000000000000000beac017
Palm staking0x5f840d54a42AB85c6A5bc50AE773a9E3531b171f
wrapped OVER0x2eFE8DBa880813F1d988208Ad35c184593559a9D
multicall30xC973B98AB6ffAf2289E9A8956D156f3897053aa5
- + \ No newline at end of file diff --git a/developers/differences-from-ethereum.html b/developers/differences-from-ethereum.html index d75cba8..f72855d 100644 --- a/developers/differences-from-ethereum.html +++ b/developers/differences-from-ethereum.html @@ -4,13 +4,13 @@ Differences from Ethereum | OverProtocol Docs - +

Differences from Ethereum

OverProtocol is an independent Layer 1 protocol that inherits the Ethereum Virtual Machine (EVM), ensuring compatibility with Ethereum's established ecosystem. This compatibility enables developers familiar with Ethereum to transition smoothly and leverage their existing skills. However, there are key distinctions between OverProtocol and Ethereum that developers must understand, as these differences can significantly impact how applications are built and function on this platform. Here are the crucial aspects to consider and the actions to take:

Your Accounts Can Be Expired

In OverProtocol, inactive accounts are subject to expiration. This mechanism optimizes network efficiency and scalability by reducing the overhead of maintaining dormant accounts. Account restoration involves a new transaction type and additional EVM functionalities rather than opcode-level implementation.

On the mainnet, the Ethanos cycle lasts approximately 3 months, meaning that it takes 3 to 6 months for an inactive account to expire. The assessment of activity is based on the Ethanos cycle, so if you were active near the end of a cycle, your account could remain active for one more cycle. Conversely, if you were active at the beginning, your account could last for two cycles.

Actions to Take

If Your Account is Expired:

  • Do not worry; it can be restored without any penalties. How?
  • To restore your accounts, you can request to restoration client for the restoration.
  • Currently, you need to operate your own restoration client, but future services will provide more convenient restoration options.

If Your Account is Not Expired:

  • Ensure that accounts, especially contract accounts, are periodically used to prevent expiration. Usage is defined as any transaction that queries the contract account's state or calls its functions, including view functions.
  • Regularly monitor account activity to avoid unintentional expiration and ensure continuity of service. A monitoring tool will be available soon.
  • Especially for contract accounts with significant storage, prevent expiration as restoring storage can be costly. Future advancements will improve storage management efficiency, but for now, some monitoring and inconvenience are necessary.
info

For contract accounts, especially those with extensive storage, expiration can be costly to restore. While we are developing more efficient storage management techniques, please bear with the current monitoring requirements to prevent expiration.

You Can't Use the Same Contract Address in Ethereum

tip

While the same Externally Owned Account (EOA) address can be used across various EVM-compatible chains with the same private key, this does not apply to contract addresses.

Due to the state expiry feature in OverProtocol, all accounts, including contract accounts, could eventually expire. To mitigate the risk of an expired contract address being reused by a newly created contract, the contract creation operation always incorporates the caller account's restoredEpoch value. This inclusion alters the outcome of the CREATE operation, making the resulting addresses differ from those on other EVM chains.

As a result, even though the CREATE2 operation allows for deterministic address prediction and usage, it is not possible to reuse the same address across different chains as you would with EOA addresses.

Actions to Take

  • Be aware that contract addresses on OverProtocol will differ from those on Ethereum and other EVM-compatible chains due to the inclusion of the restoredEpoch value.
  • When deploying contracts, account for the different address derivation method and plan your deployment strategy accordingly.
// Create creates a new contract using code as deployment code.
func (evm *EVM) Create(caller ContractRef, code []byte, gas uint64, value *big.Int) (ret []byte, contractAddr common.Address, leftOverGas uint64, err error) {
contractAddr = crypto.CreateAddress(caller.Address(), evm.StateDB.GetRestoredEpoch(caller.Address()), evm.StateDB.GetNonce(caller.Address()))
return evm.create(caller, &codeAndHash{code: code}, gas, value, contractAddr, CREATE)
}

// Create2 creates a new contract using code as deployment code.
//
// The different between Create2 with Create is Create2 uses keccak256(0xff ++ msg.sender ++ salt ++ keccak256(init_code))[12:]
// instead of the usual sender-and-nonce-hash as the address where the contract is initialized at.
func (evm *EVM) Create2(caller ContractRef, code []byte, gas uint64, endowment *big.Int, salt *uint256.Int) (ret []byte, contractAddr common.Address, leftOverGas uint64, err error) {
codeAndHash := &codeAndHash{code: code}
contractAddr = crypto.CreateAddress2(caller.Address(), salt.Bytes32(), codeAndHash.Hash().Bytes())
return evm.create(caller, codeAndHash, gas, endowment, contractAddr, CREATE2)
}

Transaction has a restoredEpoch Field

In traditional blockchain architectures, the nonce primarily tracks the number of transactions sent from a given account, ensuring transaction order and preventing double-spending. However, due to the expiration feature in OverProtocol, distinguishing explicitly between expired accounts and newly created accounts becomes challenging, raising the possibility of nonce overlap. To address this issue, OverProtocol introduces the restoredEpoch as a crucial component.

RestoredEpoch

The combination of the nonce and the restoredEpoch value ensures uniqueness for each account. This system allows OverProtocol to maintain the integrity and distinction of account states over time, even through periods of account inactivity and expiration.

For a more detailed explanation, please refer to the documentation.

nonce Field in Transaction

The existing nonce field is split into a 64-bit field, with the first 32 bits representing the restoredEpoch and the remaining 32 bits functioning as the traditional nonce. This adaptation allows developers to leverage existing Ethereum development environments while accommodating the unique features of OverProtocol.

Actions to Take

  • Learn how restoredEpoch functions and its interaction with the nonce to ensure each account's uniqueness.
  • Use RPC requests like eth_getTransactionCount when making transactions. The response will include the correct nonce value, considering both nonce and restoredEpoch.

Misc

SELFDESTRUCT Operation

The SELFDESTRUCT opcode, updated in accordance with EIP-6780, is implemented in such a way that while it does not actually destroy the contract account, it does process refunds. Contracts that are not used will naturally expire over time as the Ethanos epoch progresses.

The rationale behind incorporating EIP-6780 into OverProtocol differs significantly from its application in Ethereum. OverProtocol's implementation is specifically designed to avoid scenarios where a self-destructed contract account becomes indistinguishable from an Externally Owned Account (EOA). This distinction is crucial for maintaining clarity and integrity in the network's account management, ensuring that the lifecycle of contract accounts is handled in a better way.

Future Changes

As OverProtocol progresses towards the Ethanos endgame, significant changes are planned, particularly regarding how storage is managed within accounts. These adjustments will be designed to ensure that backward compatibility is maximally preserved and that a seamless migration can occur. This means that current dApp developers should not be overly concerned about the impending changes.

Storage Layout Change

Upcoming updates to OverProtocol will include a comprehensive overhaul of the storage layout within accounts. This change aims to enhance the efficiency and scalability of data management on the blockchain. Details on the new storage system will be provided as development progresses, ensuring developers have ample time to adapt their applications. This transition is intended to be smooth, with support structures in place to assist developers in migrating existing applications without disruption.

- + \ No newline at end of file diff --git a/developers/how-can-i-restore-my-account.html b/developers/how-can-i-restore-my-account.html index cd92e08..f6ad6a2 100644 --- a/developers/how-can-i-restore-my-account.html +++ b/developers/how-can-i-restore-my-account.html @@ -4,13 +4,13 @@ How can I restore my Account? | OverProtocol Docs - +

How can I restore my Account?

To restore your account which was swept away to the nether layer, you need to send a restore data to the restoration client.

Restore Data

A valid restore data should include the following fields.

  • ChainID - To prevent attacks across different networks
  • Target - Address of the target account to restore
  • SourceEpoch - The current epochCoverage of the target account. This field should be set to the default epochCoverage which is current epoch -1 if the account is not currently existent.
  • TargetEpoch - Target epoch to restore the account.
  • Fee - Fee to pay the fee recipient.
  • FeeRecipient - Account the pay the restore fee, typically the owner account of the restoration client

In order to construct your restore data, you should retrieve the proper Fee and FeeRecipient by sending a http request to the restoration client you will use. Typically the restoration client will provide this through http request named something like minimumFee for fee information and feeRecipient for fee recipient. Your request will look something like this.

curl -X GET "http://hostAddress:hostPort/minimumFee"
curl -X GET "http://hostAddress:hostPort/feeRecipient"

Now before sending this restore data, you have to sign it. There are multiple reasons for this procedure, but the most important reason is so the restoration client can’t manipulate the restoration fee. You can sign the restore data by using the SignRestoreData function in the types package. Any account including the owner expired account can be used to sign the restore data. Note that the one who signs the restore data, which is the owner of the private key used to signed the restore data, will be the one who pays the restoration fee.

After making a valid restore data and signing it, you can send the restore data through requestRestoration http post method. The request should look something like this.

curl -H 'Content-Type: application/json' \
-X POST "http://hostAddress:hostPort/requestRestoration" \
--data '{"chainId": "0x84442",
"target": "0x1923f626bb8dc025849e00f99c25fe2b2f7fb0db",
"sourceEpoch": "0x10",
"targetEpoch": "0x5",
"fee": "0x100",
"feeRecipient": "0x07a565b7ed7d7a678680a4c162885bedbb695fe0",
"v": "0x26",
"r": "0x223a7c9bcf5531c99be5ea7082183816eb20cfe0bbc322e97cc5c7f71ab8b20e",
"s": "0x2aadee6b34b45bb15bc42d9c09de4a6754e7000908da72d48cc7704971491663"
}'

After the restoration client validate the restore data and check if the fee is profitable, it will send the restore create the corresponding restoration proof and send a restoration transaction to restore your account.

We are currently working on a more user friendly interface for signing and sending restore data. Until then, please use the solution above.

- + \ No newline at end of file diff --git a/index.html b/index.html index 7c74256..4415274 100644 --- a/index.html +++ b/index.html @@ -4,13 +4,13 @@ OverProtocol | OverProtocol Docs - + - + \ No newline at end of file diff --git a/learn.html b/learn.html index 37f65ed..0c8cfae 100644 --- a/learn.html +++ b/learn.html @@ -4,13 +4,13 @@ What is OverProtocol | OverProtocol Docs - +

What is OverProtocol

OverProtocol is a brand new layer 1 with lightweight nodes empowering personal computers, enabling anyone to run a node on their PCs and become a validator. This is made possible by OverProtocol's layered architecture through Ethanos, which significantly decrease the resources required for block validation.

The vision of OverProtocol is to create a blockchain-based system with high inclusivity, allowing more participants to own and utilize value within the system.

Blockchain is a system based on a cooperation mechanism that trusts the system’s operation rather than any specific entity. It becomes safer and garners more trust from participants when a variety of stakeholders, unlikely to collude, come together. This is the fundamental reason why participation in blockchain systems needs to be open and the barriers to entry low.

The trust established in this way plays a role in safeguarding "records." The blockchain system continuously records changing states, with its most critical and simplest use case being the ledger for transaction records. By protecting these transaction records, the system can contain and utilize various forms of value.

By creating a system that anyone can participate in and use, OverProtocol will spread the experience of owning and utilizing various forms of value to more people. Ultimately, OverProtocol seeks to transform traditional methods of owning and using value, ensuring that technology serves as a bridge rather than a barrier, thus enhancing the economic empowerment of all participants.

- + \ No newline at end of file diff --git a/learn/key-features/layered-architecture/ethanos.html b/learn/key-features/layered-architecture/ethanos.html index 288b1bf..6466356 100644 --- a/learn/key-features/layered-architecture/ethanos.html +++ b/learn/key-features/layered-architecture/ethanos.html @@ -4,13 +4,13 @@ Ethanos | OverProtocol Docs - +

Ethanos

Ethanos is an effective mechanism for managing blockchain's state and history. It periodically resets the state, expiring old data and referencing previous cycles to manage a bounded state size. This approach lowers entry barriers, promotes decentralization, and fosters an inclusive blockchain system.

What is the Problem?

It is essential to address the ever-increasing data size issue in blockchain systems. The account-based blockchain system, which records the global state of accounts and balances separately from transactions, offers a simpler and more intuitive framework for developing smart contracts. These tiny Turing-complete programs execute specific tasks using account states when triggered by transactions, and their integrity is verified by every node in the blockchain.

Typically, as time progresses, the number of accounts and transactions in any blockchain grows, leading to an infinite increase in state and history data. This growth in data size results in higher memory usage, more disk operations, and significant performance burdens. Consequently, this also creates substantial barriers for new participants attempting to synchronize and engage with the blockchain system.

How Ethanos Works

Differentiating States

Ethanos segments the state into three tiers: active, staged, and inactive. Both active and staged states are maintained within the Over Layer, while inactive states are transferred to the Nether Layer.

Ethanos manages its operations through what are called sweep epochs, which are defined time cycles in the system, each composed of several blocks. At the start of each sweep epoch, Ethanos constructs a new empty state trie for the active states. It also references the entire set of the states from the previous epoch's last block, known as the "superblock", now designated as staged states for the current epoch. Both of these states are housed in the Over Layer.

During each transaction within an epoch, the current state trie is updated as follows: If a transaction involves a specific account, the system first checks the current epoch’s state trie. If the account is not found there, it then searches the previous epoch’s state trie. If located, the account details are seamlessly transferred to the current state trie. If the account is absent from both state tries, it indicates that the account has either been inactive in past epochs or is a completely new account. In both scenarios, Ethanos treats these as new accounts.

If an account from the last epoch’s trie is not involved in any transaction during the current epoch, it is then classified as inactive in the subsequent epoch and considered expired. These accounts enter a dormant status and are categorized as dormant accounts within the Nether Layer. However, being expired does not mean the account is permanently lost; a dormant account can be reactivated through restoration from the Nether Layer.

From the account's perspective, each interaction or read operation restores two life points. As each sweep epoch passes, all accounts lose one life point. If an account goes through two consecutive sweep epochs without any life point recovery, it can no longer reside in the Over Layer.

Distinguishing History

Ethanos employs the weak subjectivity point to purge data corresponding to the block body. This approach is straightforward but requires a mechanism to ensure the availability of the purged history. This aspect is still under research and development, with plans to leverage a light layer to facilitate this process.

Restoration Process

To restore a dormant account, proof is required of the last epoch in which the account was active. This is crucial to prevent attempts to recover an account to a state before assets were transferred out in subsequent epochs. The trie structure in which the state is stored can efficiently prove whether a specific account was present within a state, as long as there is a valid root value, using a Merkle proof.

Restoration involves providing both an existence proof for the state of the last active epoch's superblock and non-existence proofs for the epochs during which the account was inactive. Combining these proofs allows for the restoration of the account's state to its condition in the current epoch. This process ensures that restoration is both secure and accurate, preventing unauthorized manipulations of account histories.

Dealing with Crumb Accounts; Restored Epoch

As mentioned, Ethanos does not differentiate between expired accounts and accounts that never existed. In the current epoch, if an empty account receives funds and its value is initialized, the holder of the account's private key can begin to send transactions and engage in activities using this account. An account that was previously expired but has been reinitialized and put back into use is referred to as a "crumb account." The existence of crumb accounts adds complexity to the restoration process.

While we could have eliminated crumb accounts by requiring restoration to go back to the genesis epoch before activating any account, we chose not to adopt this approach for UX reasons. One significant issue with crumb accounts is that they undermine the purpose of the nonce, which exists to record the number of transactions an account has made, thereby preventing any transaction from being executed multiple times.

If nonces are reset to zero every time an account is initialized in each epoch, it could allow for the reuse of previously utilized nonces in transactions involving crumb accounts. This situation would make the network vulnerable to specific types of replay attacks. To mitigate such risks while maintaining the efficiency of the restoration process and the simplicity of nonce values, we decided to add a field called "restored epoch" to each account.

The "restored epoch" value for an account created in a specific epoch is set to max(0, current epoch number - 1). This signifies that the account did not exist in the state of one epoch prior, relative to the current epoch. The "restored epoch" value remains constant as long as the account remains active. For example, if an account is initialized with a "restored epoch" value of 2 in Epoch 3 and continues to be active until Epoch 9, the "restored epoch" value would still be 2. This constant value helps in tracking the initial restoration point of an account throughout its active period, providing a clear reference for any processes or checks that rely on the historical status of the account.

Restoration Process with Restored Epoch

The restoration process unfolds as follows: For the account to be restored, verification starts with a non-existence proof for the state of two epochs prior to the current one and proceeds in sequence until the last active state where an existence proof is available. A key consideration here is that the account being activated could be a crumb account. For such crumb accounts, there is no need to verify beyond the restored epoch value. Instead, proofs should be sequentially submitted up to the restored epoch minus one.

The restoration completes with the merging of the results after proofs are verified. For balances, this involves performing a sum operation, and similarly for nonces. The Restored Epoch value is determined by taking the minimum value, which indicates that the account's balance and nonce have been verified up to that particular epoch. For instance, accounts with a restored epoch value of 0 at any point signify that their balance and nonce have been consistently preserved from the genesis to the present.

Restoration can occur in parts. For example, an account that became active in epoch 6 does not necessarily need to be restored back to epoch 0. It can continue to operate with a restored epoch value of 5, thereby simplifying the restoration process and reducing unnecessary computational effort.

Specification

Sweep

A configuration known as SWEEP_EPOCH has been introduced to determine the frequency at which inactive accounts are expired. SWEEP_EPOCH defines the interval for performing sweeps, with sweeps occurring every epoch as designated by this setting.

The state trie captures the activities of each account in every epoch. At each superblock, the final block of the epoch, the current state trie is frozen and a new empty state trie is created. This frozen trie is referred to as a checkpoint trie.

In the following epochs, whenever an account's state needs updating and the account is not found in the current trie, a process is initiated to retrieve the account information from the previous checkpoint trie and integrate it into the current trie. If the account is already present in the current trie, the update is performed immediately.

Restored Epoch

When an account expires, its state values are reset to empty.

A restored_epoch field is added to each account to record the epoch during which it was last restored. This field is crucial for determining if an account has undergone restoration previously. The initial value for restored_epoch is set to max(0, current epoch number - 1).

The restored_epoch serves a function similar to that of the nonce during the restoration process by making it possible to selectively determine the point of restoration. This significantly reduces the complexity of verification as it eliminates the need to validate the state starting from the genesis block.

Furthermore, restored_epoch plays a vital role in contract creation. It helps ensure the uniqueness of contract addresses by preventing the regeneration of an address that has been previously used. This feature maintains the integrity and uniqueness of contract deployments on the blockchain.

Restoration Data

The format for restoration data is crucial for facilitating the recovery of accounts. The required data fields for initiating a recovery transaction include:

[chain_id, expire_epoch, target, target_epoch, fee, fee_recipient, signature_y_parity, signature_r, signature_s]

  • chain_id: Identifies the specific blockchain network where the recovery transaction will occur.
  • expire_epoch: Specifies the epoch limit for which this recovery data is valid.
  • target: The account address to be recovered.
  • target_epoch: The earliest epoch from which the account's data needs to be restored.
  • fee: The fee to be paid for the recovery to fee_recipient.
  • fee_recipient: The address designated to receive the fee for facilitating the recovery.
  • signature_y_parity, signature_r, signature_s: Components of the signature that authenticate the recovery request. This signature must be generated by the account responsible for paying the fee.

This data structure incentivizes data providers to provide the necessary historical data for recovery while also allowing an alternative account to cover the recovery fee. This mechanism ensures that recovery transactions are both secure and financially supported.

Restoration Transaction

For efficient recovery mechanisms within blockchain systems, it is essential to integrate recovery data within transactional frameworks. To facilitate this, we propose a new transaction type under EIP-2718 specifically designed for account restoration.

This new type extends the existing structure of EIP-1559 transactions by adding a restore_data field. The complete field structure is as follows:

[chain_id, nonce, max_priority_fee_per_gas, max_fee_per_gas, gas_limit, destination, amount, data, access_list, restore_data, signature_y_parity, signature_r, signature_s]

Restoration Process(Pseudocode)

Restoration process is done by following steps:

  1. Collect the account's state proofs for each required epoch
  2. Construct and send a restoration transaction with the collected proofs
  3. Upon receiving the restoration transaction:
    • For each proofs of epoch:
      1. Verify the proof
      2. Get the state of the epoch.
      3. Apply the state
def restore_account(account, proofs):
restored_epoch = account.restored_epoch

for proof in proofs:
root_hash = get_last_checkpoint_block_by_epoch(restored_epoch).state_root

if is_accurate_merkle_proof(root_hash, account, proof): # Proof is non-void proof
restored_account = extract_account(merkle_proof)
account.restored_epoch = restored_account.restored_epoch
account.balance += restored_account.balance
account.nonce += restored_account.nonce
elif is_accurate_void_proof(root_hash, account, proof): # Proof is void proof
restored_epoch -= 1
else: # Proof is invalid
raise Exception("Inaccurate proof")

Restoration Cost Breakdown

The recovery process entails several operations such as reading or verifying data and performing decoding tasks. Each task contributes to the total cost, which is determined by the number of epochs involved in the recovery and the amount of data processed.

Here is a breakdown of the costs associated with different operations during the recovery process:

OperationGas
read restoredEpoch20
read nonce20
read balance20
Keccak256100
Ecrecover3000
CallValueTransfer9000
CallNewAccount25000
read header800 per epoch
RLP decoding1 per word
verifyProof100 per epoch, 2 per word

Cost Per Epoch

For each epoch involved in the recovery process, the incurred costs include a verifyProof operation and a read header operation, each contributing to an approximate total of 900 gas per epoch. If the proof is for the existence proof, an additional RLP decoding operation is also necessary.

Variable costs are determined by the length of the input data, involving one RLP decoding and one verifyProof operation, both of which scale with the size of the input. These contribute an additional cost of 3 gas per word.

The formula for calculating the total recovery cost is structured as follows:

Total Restoration Cost=37000+900×Epoch+3×words+Memory Cost\text{Total Restoration Cost} = 37000 + 900\times{Epoch} + 3\times{words} + \text{Memory Cost}

Here, 37000 gas covers the initial operations such as account creation and transaction verification, 900 gas for each epoch reflects the fixed costs per epoch, 3 gas per word accounts for variable decoding and proof verification costs, and additional memory costs.

The Endgame

Future Directions for Contract Restoration

In Ethanos, state expiration occurs on an account-by-account basis. However, dealing with contracts poses a challenge due to the storage managed under contract accounts.

In the EVM, the storage owned by contracts can grow indefinitely in size. Handling unlimited data sizes within transaction forms to restore contracts is not feasible; it would potentially flood and paralyze the peer-to-peer network. Therefore, currently, OverProtocol has disabled the restoration functionality for contracts. Contracts must be managed in a way that prevents them from expiring.

In the ultimate vision of Ethanos, overcoming these limitations requires moving away from the traditional storage layout of the EVM. It becomes necessary to bound the size of data that accounts can own, and allow for contract-managed storages to be partially expired and restored.

In due time, a process of state and storage migration for all contracts will be implemented. This migration will occur between sweep epochs, facilitating a transition that maintains system integrity while accommodating the expansive nature of contract storage.

- + \ No newline at end of file diff --git a/learn/key-features/layered-architecture/overview.html b/learn/key-features/layered-architecture/overview.html index 7975125..d227ac4 100644 --- a/learn/key-features/layered-architecture/overview.html +++ b/learn/key-features/layered-architecture/overview.html @@ -4,13 +4,13 @@ Layered Architecture | OverProtocol Docs - +

Layered Architecture

OverProtocol implements a layered approach to managing blockchain data, segmenting it into more manageable and essential components. The system differentiates between active and inactive states. Active states consist of accounts that are either frequently accessed or have been accessed recently. In contrast, inactive states encompass accounts that are less frequently accessed or have not been recently used. History is similarly bifurcated into recent and older segments. Data from newer blocks are classified as recent history, whereas data from all preceding blocks falls into the category of older history.

The essential data that nodes require to process and follow blockchain records includes active states, recent history, and block header information. This subset of data is referred to as the Over Layer. Conversely, the inactive state and older history data are grouped into what is known as the Nether Layer.

The Ethanos algorithm within OverProtocol establishes the criteria for distinguishing between the Over Layer and the Nether Layer. It also provides a mechanism for restoring data from the Nether Layer back to the Over Layer. By utilizing Ethanos, OverProtocol enforces its layered architecture, limiting the size of the data in the Over Layer. This restriction ensures a bounded, manageable size for the blockchain system, enabling sustainable and scalable participation.

- + \ No newline at end of file diff --git a/learn/key-features/over-pos/overview.html b/learn/key-features/over-pos/overview.html index fbe2ae6..e44543f 100644 --- a/learn/key-features/over-pos/overview.html +++ b/learn/key-features/over-pos/overview.html @@ -4,13 +4,13 @@ Over PoS Overview | OverProtocol Docs - +

Over PoS Overview

Consensus is important

In the world of blockchain, consensus algorithms are like the referees of a match, ensuring everyone's playing by the same rules. They keep every ledger in the network in sync, validating transactions, and maintaining a decentralized, tamper-proof system that instills trust among participants. There are various kinds of these algorithms, such as Proof-of-Work (PoW) which tasks miners with solving complicated math puzzles, and Proof-of-Stake (PoS) which selects validators based on their token holdings.

For OverProtocol, PoS sits at the heart of our operations. Participants prepare a substantial amount of OVER tokens from the market and put them up as collateral to create and validate blocks. If they perform their role successfully, they are rewarded with OVER. However, any malicious activity can lead to penalties - anything from suspension to a complete loss of staked OVER tokens. So, playing fair is not just encouraged, it's mandatory.

Why PoS

While there's a broad array of PoS algorithms available, we chose to align with Ethereum's Gasper for OverProtocol. Our mission is to build a blockchain that doesn't disproportionately favor a select few, and we wanted our choice of consensus algorithm to reflect that.

Many new blockchains are leaning towards a Delegated Proof of Stake (DPoS) format, where a small group of validators are seleceted (such as Cosmos BFT and Aptos etc). But this can cause performance issues if these validators don't meet high standards. They are expected to manage a robust node operation environment to ensure the speed and performance of the blockchain consensus, making it a tall order for the average person due to the need for advanced infrastructure and significant capital.

Contrastingly, Ethereum's Gasper allows for a larger pool of validators and is more accommodating to those with less sophisticated node operation environments. Aligned with Over's philosophy and vision for blockchain, we've adopted a slightly tweaked version of Gasper. This move ensures a more inclusive consensus process, making participation in the blockchain more accessible to everyone, regardless of their resources.

In reality, Ethereum's staking has shown a trend towards centralization, with close to 56\% of the staked amount held by the top four validators. This concentration goes against the core goal of decentralization, posing a significant roadblock. We believe the root cause lies in the high hardware requirements. Although the consensus protocol theoretically supports millions of validators, the practical requirements for running a node continue to be a formidable barrier.

OverProtocol aims to tackle this issue head-on by harnessing the power of lightweight client techniques. These techniques significantly trim down the resource requirements, making it possible for anyone with a basic PC to run a node and step into the role of a validator. By integrating these techniques with Gasper, Over brings the concept of home staking to life. Consequently, anyone can now contribute to the network's security and stability, regardless of their resources.

- + \ No newline at end of file diff --git a/learn/key-features/over-pos/requirements.html b/learn/key-features/over-pos/requirements.html index fbb26a6..e1a4101 100644 --- a/learn/key-features/over-pos/requirements.html +++ b/learn/key-features/over-pos/requirements.html @@ -4,14 +4,14 @@ Requirements | OverProtocol Docs - +

Requirements

The requirements for becoming and maintaining the status of a validator within the OverProtocol are set to balance the need for active participation and accessibility. These requirements ensure a stable and robust network while keeping participation as accessible as possible. By aligning the interests of validators with the network's stability and offering a carefully calibrated set of rules, OverProtocol aims to foster a thriving ecosystem where validators play an essential role. Below is an explanation of the primary requirements:

Stake Minimum Amount of OVER to be a validator

The minimum stake requirement of OVER ensures that validators have something significant at risk, motivating them to actively participate in the consensus. This helps stabilize the value of the coin and aids swift consensus. However, OverProtocol recognizes the need for accessibility and wants to avoid creating unnecessary barriers. For now, 256 OVER is selected as a staking requirement, considering the need to include as many participants as possible without compromising the system's integrity.

Maintain at Least 70% Uptime

Every validator must maintain an uptime of at least 70%. This criterion is crucial for the system's stability, as the consensus process depends on both the number of validators and each one's average uptime. Importantly, our mathematical modeling is conducted under the assumption that the system is operated solely by ordinary individuals, not a select few with specialized operational expertise (e.g., blockchain infrastructure providers). The calculations demonstrate that a 70% uptime from an ordinary validator guarantees system safety, assuming that more than 16,384 validators are involved (Ethereum's original design aimed to attract over 16,384 validators to ensure a smooth transition to the Proof-of-Stake (PoS) system).

In our system, we have instituted an evaluation scheme termed risk score to assess each validator's uptime. If a validator does not meet the uptime benchmark, its risk score escalates. Once the score exceeds a specific threshold, indicating a significant risk of that validator's participation in consensus, the validator is removed from the validation network. This strategy guarantees that validators remain dedicated to preserving their online presence, thereby positively influencing the network's reliability. For a comprehensive understanding, consult the Rewards and Penalties.

- + \ No newline at end of file diff --git a/learn/key-features/over-pos/rewards-and-penalties.html b/learn/key-features/over-pos/rewards-and-penalties.html index 0d65efb..235b44f 100644 --- a/learn/key-features/over-pos/rewards-and-penalties.html +++ b/learn/key-features/over-pos/rewards-and-penalties.html @@ -4,13 +4,13 @@ Rewards and Penalties | OverProtocol Docs - +

Rewards and Penalties

The reward and penalty system serves as a mechanism to steer the blockchain network towards enhanced security. Rewards should be designed to encourage honest and diligent participants to continue contributing to the network. On the other hand, penalties should be crafted to deter or swiftly remove participants who might harm the network. However, care should be taken to ensure that excessively stringent penalties do not deter participation by creating psychological barriers.

Gasper's reward and penalty structure are delicately parameterized considering these factors. Validators, when given an opportunity to create a block, receive a relatively large reward. However, over extended periods of active validation, the rewards from participating in attestations in every epoch become more significant. In essence, the system rewards validators more as they remain diligently active. Conversely, penalties for not participating are balanced with potential rewards, ensuring that temporary downtimes, like short node outages, are not overly punitive.

However, direct threats to the consensus activate a stringent rule known as slashing, invoking strong penalties. Slashing comes into play in situations such as 1) when a proposer broadcasts conflicting blocks or 2) when an attester makes contradictory votes or engages in double-voting. Validators who witness these violations become whistleblowers, presenting evidence to the network. Violators face severe asset forfeiture and lose their validating rights.

Introducing Bailout - Rescuing Offline Validators

In OverProtocol, this foundational reward and penalty structure is augmented with an Bailout (rescuing offline validators) mechanism. The rescue mechanism swiftly removes validators not maintaining adequate uptime from the consensus. Validators are rescued out of the network if their risk score surpasses a set threshold, which increases during prolonged downtime and decreases upon uptime. The system thereby monitors and rescues those who consistently fail to maintain adequate uptime.

There are two primary reasons for implementing this risk score:

Reason 1. Safeguarding the Validator's Balance

In any consensus-driven blockchain system, validators pledge a certain amount of assets as collateral. This ensures their vested interest in the proper functioning and security of the network. When validators are inactive or misbehave, they are penalized, causing a deduction from this pledged balance. Over time, if a validator remains inactive, these penalties can accumulate, significantly eroding their collateral.

The rescue mechanism acts as a protective measure in such scenarios. By detecting and ejecting consistently inactive validators, the system prevents their balance from being drained excessively. It's analogous to a safety net, ensuring that validators do not incur irreversible financial damage due to prolonged inactivity, which might sometimes be beyond their control, such as technical glitches or unforeseen disruptions.

Moreover, this mechanism protects not just the individual validators but also the overall network's financial incentives. If validators see their peers losing vast amounts of collateral due to extended downtimes, it could deter potential validators from joining or continuing in the network, fearing substantial losses.

Reason 2. Ensuring System Security and Optimal Performance

A blockchain network thrives on the active participation of its validators. They are responsible for proposing, verifying, and finalizing transactions or blocks in the chain. Inactive validators can slow down this process. Moreover, a significant number of inactive validators can make the network more susceptible to attacks and reduce its overall security.

The rescue mechanism identifies and removes these inactive participants, ensuring that only active, reliable validators contribute to the consensus process. By doing so, it keeps the network's performance optimized and maintains its security standards.

How the Bailout Works

In essence, the rescue mechanism is both a protective and proactive feature, maintaining the financial health of validators and the operational integrity of the network. The risk score is designed to incentivize validators to maintain an uptime higher than 67%, assuming there are at least 16,384 validators in the network. At the network’s initial stage when there are fewer validators, the required uptime hurdle is set to 70%. As the number of validators increases, this hurdle gradually decreases to 67% uptime.

This design takes into account that a higher number of validators can inherently improve the system’s resilience from a statistical perspective, thereby allowing the validator uptime hurdle to be lowered. But, regardless of validator numbers in validator will face an increase in risk score during downtime and a decrease of the score during uptime.

About the Validator Risk Score

This part depicts the formula and illustration of the validator risk score. First, we define the following.

N:number of validatorsN: \text{number of validators}

P:validator uptime(0P1)P: \text{validator uptime} (0 \leq P\leq 1)

ΔSmax:maximum risk score increment at one epoch=1\Delta{S_{max}} : \text{maximum risk score increment at one epoch} = 1

Vmin:minimum number of validators required=16384V_{min}: \text{minimum number of validators required} = 16384.

Validator's risk score is added up by aa in downtime, and decreased by bb in uptime.

Then the expectation is risk score per epoch=a(1p)bp=(a+b)p+a\text{risk score per epoch} = a(1-p)-bp = -(a+b)p +a, and the x-intercept of this function would be aa+b\frac{a}{a+b}. We know that value aa is ΔSmax\Delta{S_{max}}, hence 1. So the x-intercept is 11+b\frac{1}{1+b}. We target the x-intercept to be the validator uptime threshold. That is, if we denote, ff: the step function which indicates the required uptime depending on each validator (Illustrated in the figure below), 11+b=f\frac{1}{1+b} = f

Required Validator Uptime

Then the relationship between the validator's uptime and risk score delta per epoch is shown in the following illustration and equation.

Delta Risk Score per epoch
ΔRisk Score per Epoch=ΔSmaxfP+ΔSmax\Delta\text{Risk Score per Epoch}= - \frac{\Delta S_{max}}{f}\cdot{P} + \Delta{S_{max}}

When there are a small number of validators participating in the system, you are required to maintain a relatively high uptime. That is, given N=VminN= V_{min}, risk score increases when P0.70P \leq 0.70. The figure's blue-colored line has an x-intercept at P=0.7P=0.7. As more validators participate in the system, the required uptime hurdle lowers. That is, when N=infN= inf, risk score increases when P23P \leq \frac{2}{3}. The figure's orange-colored line has an x-intercept near P=23P=\frac{2}{3}.

- + \ No newline at end of file diff --git a/learn/key-features/tokenomics/distribution.html b/learn/key-features/tokenomics/distribution.html index ab35987..dde07ab 100644 --- a/learn/key-features/tokenomics/distribution.html +++ b/learn/key-features/tokenomics/distribution.html @@ -4,13 +4,15 @@ Token Distribution | OverProtocol Docs - +
-

Token Distribution

info

Disclaimer: The tokenomics details, including the distribution and allocation mechanisms, are still under development and may be adjusted before the mainnet launch to better serve the evolving needs of the OverProtocol ecosystem.

OverProtocol has a supply schedule that releases 1 billion OVER over 10 years. Upon entering the 11th year, no additional tokens will be issued.

Token Allocation

1. Staking Rewards

30% of the total tokens, equating to 300 million OVER, will be distributed over 10 years. The issuance plan allocates 200 million OVER as the minimum guaranteed reward, distributed in equal annual amounts to stakers. The remaining 100 million OVER will be used as an adjustable reward, modulated in real-time by the system without human intervention, based on the desired staking quantity. Further details are described below.

2. DAO Treasury

The DAO Treasury is a pivotal component in the sustainable growth and development of the OverProtocol ecosystem, with the goal of supporting DeFi, Layer 2 solutions, and other ecosystem initiatives. It will introduce governance mechanisms allowing participation from node validators, Nethers stakers, and potentially other contributors in deciding the allocation of treasury funds. The treasury will be funded by a linear distribution of 10% of the total supply (100 million OVER) over ten years, along with the base fee from transaction fees being allocated to the treasury.

3. Over Community Access Program(OCAP)

Of the total supply, 15% is initially allocated to the OCAP. OCAP facilitates the distribution of OVER in various ways, such as airdrops for early community members and contributors, or through liquidity provision. The goal is to make participation in OverProtocol accessible to those who share our vision.

4. Others

The remaining 450 Million OVER is earmarked for distribution to 4 entities (Core Contributors, Investors, Over Technologies, and Over Foundation) over a 4-year schedule. Refer to Table \ref{table:0} for the yearly allocation amounts. Each percentage point indicates the proportion of allocation distribution relative to the total 1 billion OVER.

alloc_chart

Staking Rewards

Minimum Guaranteed Rewards

OverProtocol's token issuance plan allocates 200 million OVER as the minimum guaranteed reward, distributed in equal annual amounts to stakers. These rewards are proportionally distributed to validators based on their staking balance and required participation rate every epoch.

Adjustable Rewards

The actual release of staking rewards is adjusted by the protocol's predefined feedback mechanism. This mechanism acts as a reserve system to modulate the issuance levels: it reduces actual issuance when staking rewards are sufficient and increases issuance when additional rewards are needed. These adjustments are made based on the current staking rate, ranging between a minimum guaranteed reward of 20 million OVER per year to a maximum of 30 million OVER per year. Importantly, this reserve system is not managed by external entities but is governed by an internal feedback mechanism within the protocol. Refer to this page for a comprehensive overview of the feedback mechanism.

After the 10-year issuance period is over, any remaining reserve for the adjustable rewards will continue to be distributed to stakers.

YearMinimum IssuanceMaximum Issuance
Year 1 ~ 1020M OVER30M OVER
Year 11 ~0 OVER10M OVER
- +

Token Distribution

info

Disclaimer: The tokenomics details, including the distribution and allocation mechanisms, are still under development and may be adjusted before the mainnet launch to better serve the evolving needs of the OverProtocol ecosystem.

OverProtocol has a supply schedule that releases 1 billion OVER over 10 years. Upon entering the 11th year, no additional tokens will be issued.

Token Allocation

1. Staking Rewards

30% of the total tokens, equating to 300 million OVER, will be distributed over 10 years. The issuance plan allocates 200 million OVER as the minimum guaranteed reward, distributed in equal annual amounts to stakers. The remaining 100 million OVER will be used as an adjustable reward, modulated in real-time by the system without human intervention, based on the desired staking quantity. Further details are described below.

2. DAO Treasury

The DAO Treasury is a pivotal component in the sustainable growth and development of the OverProtocol ecosystem, with the goal of supporting DeFi, Layer 2 solutions, and other ecosystem initiatives. It will introduce governance mechanisms allowing participation from node validators, Nethers stakers, and potentially other contributors in deciding the allocation of treasury funds. The treasury will be funded by a linear distribution of 10% of the total supply (100 million OVER) over ten years, along with the base fee from transaction fees being allocated to the treasury.

3. Over Community Access Program(OCAP)

Of the total supply, 15% is initially allocated to the OCAP. OCAP facilitates the distribution of OVER in various ways, such as airdrops for early community members and contributors, or through liquidity provision. The goal is to make participation in OverProtocol accessible to those who share our vision.

4. Others

The remaining 450 million OVER tokens are earmarked for distribution to four entities: Core +Contributors, Investors, Over Technologies, and the Over Foundation. The distribution will follow a +2-year schedule, which includes a 6-month cliff and 18 months of linear vesting.

alloc_chart

Staking Rewards

Minimum Guaranteed Rewards

OverProtocol's token issuance plan allocates 200 million OVER as the minimum guaranteed reward, distributed in equal annual amounts to stakers. These rewards are proportionally distributed to validators based on their staking balance and required participation rate every epoch.

Adjustable Rewards

The actual release of staking rewards is adjusted by the protocol's predefined feedback mechanism. This mechanism acts as a reserve system to modulate the issuance levels: it reduces actual issuance when staking rewards are sufficient and increases issuance when additional rewards are needed. These adjustments are made based on the current staking rate, ranging between a minimum guaranteed reward of 20 million OVER per year to a maximum of 30 million OVER per year. Importantly, this reserve system is not managed by external entities but is governed by an internal feedback mechanism within the protocol. Refer to this page for a comprehensive overview of the feedback mechanism.

After the 10-year issuance period is over, any remaining reserve for the adjustable rewards will continue to be distributed to stakers.

YearMinimum IssuanceMaximum Issuance
Year 1 ~ 1020M OVER30M OVER
Year 11 ~0 OVER10M OVER
+ \ No newline at end of file diff --git a/learn/key-features/tokenomics/fee.html b/learn/key-features/tokenomics/fee.html index e4e56b9..f0884f3 100644 --- a/learn/key-features/tokenomics/fee.html +++ b/learn/key-features/tokenomics/fee.html @@ -4,13 +4,13 @@ Fees | OverProtocol Docs - +

Fees

Currently Effective

Transaction fee

The Transaction Fee is a charge applied to each transaction within the OverProtocol's on-chain activity. This fee serves to reduce the total circulating supply of OVER tokens.

There are two primary objectives that we aim to achieve through the transaction fee design. Firstly, we seek to align user gas usage with an appropriate gas target, ensuring efficient network operation. Secondly, we aim to induce deflationary pressure through the application of base fees, thereby promoting a balanced economic environment within the network. For this purpose, we use the commonly known EIP-1559, and adjust its design which we plan to achieve through several future updates.

In the protocol's initial stages, the base fee is collected and directed to the DAO Treasury, supporting various ecosystem development initiatives. As the protocol matures, the collection strategy evolves: instead of accruing in the treasury, the base fee is directly burned from each transaction. This nuanced approach balances the initial growth needs with a longer-term strategy of reducing token supply, thereby sustaining the protocol’s economic health.

Future Plans

Storage Rent Fee

The Storage rent fee is a charge on the contract accounts levied every certain period. It charges the use of storing data on the blockchain and reduces the total circulating supply of the OVER tokens.

Storage rent is a proposed economic mechanism designed to address the inefficiency of the 'pay once, use forever' model for state storage. In traditional blockchain models, once a user pays a fee to store data or execute a transaction, the associated data remains on the blockchain indefinitely, leading to an ever-growing state. This growth poses significant scalability and efficiency challenges.

With the storage rent fee, a blockchain storage user would consistenly pay the rent to compensate for the ongoing use of the storage space. This fee incentivizes users to only retain necessary and actively used data, thereby managing the size and efficiency of the blockchain's state. That is, we can expect users to be more judicious about the data they store on the blockchain and to potentially clean up or remove data that is no longer needed.

Such a fee is levied on every Ethanos epoch, and the amount depends on the quantity of data stored with the duration for which it was stored. The storage fee is collected and directed to the DAO Treasury, supporting various ecosystem development initiatives.

Why it was hard to collect Storage Rent Fees

Implementing a storage rent fee design in conventional blockchain architectures is challenging due to the immense size of the state. For the protocol to levy storage rent, it must navigate through all state accounts to determine the appropriate charges and identify the account holders responsible for these fees. Additionally, the protocol needs to decide on the timing for such traversals. This process, under typical blockchain designs, presents significant complexities and operational inefficiencies, making the implementation of a storage rent fee system difficult. Consequently, in many cases, once a fee is paid for storing new values in the state, the space is occupied indefinitely, bypassing ongoing storage costs.

OverProtocol's Approach

Through its innovative Ethanos technique, the OverProtocol effectively manages state size and introduces periodic intervals, streamlining the process of imposing storage rent fees. This approach allows for a straightforward determination of when and which contract accounts should be charged. The whitepaper, OverProtocol: Towards True Decentralization, elaborates on Ethanos, but here we present its core principles.

OverProtocol distinguishes between active and inactive accounts by resetting states at regular intervals, leveraging the consistency of activity across these cycles. Active accounts, identified by their continuous operation through cycles, are seamlessly transferred from the finalized state of the previous cycle to the current cycle's state. This transfer occurs at the first transactional interaction in the new cycle.

At this juncture, storage rent is levied on contract accounts, employing the efficiency of the Ethanos technique without necessitating external state traversal. This efficiency is further enhanced by the protocol's managed state size. Additionally, accounts in OverProtocol are equipped with metadata that assesses their storage size, creating a system where larger storage spaces incur higher rent. This design facilitates a fair and usage-based charging model.

The storage rent design is still under development, with the goal of establishing a user-friendly framework that simultaneously fosters a robust storage economy.

- + \ No newline at end of file diff --git a/learn/key-features/tokenomics/feedback.html b/learn/key-features/tokenomics/feedback.html index dae6dfa..7debc05 100644 --- a/learn/key-features/tokenomics/feedback.html +++ b/learn/key-features/tokenomics/feedback.html @@ -4,13 +4,13 @@ Deposit and Yield | OverProtocol Docs - +

Deposit and Yield

OverProtocol employs a proof-of-stake mechanism, requiring validators to deposit OVER tokens to participate in the network's block creation process. The yield is the key to attracting the deposit, and is the reward given to the validators. Let's delve into the role and significance of this deposit and yield in a PoS blockchain, focusing on OverProtocol's system design.

Deposit

The deposit in OverProtocol serves as an economic safeguard, deterring actions that could undermine the blockchain's integrity. To gain significant control over the chain, an attacker would need to acquire more than two-thirds of the total deposited tokens. Additionally, owning a third or more of these tokens could disrupt the consensus algorithm's finalization process. Therefore, the network's economic security is strengthened by increasing both the number of participants and the volume of deposited tokens.

While a higher token deposit undoubtedly enhances the chain's safety, the utility of OVER tokens goes beyond security. These tokens play a crucial role in the network, including paying network fees, acting as intermediaries in exchanges, and supporting liquidity in the OverProtocol's economic activities. Consequently, an excessively high deposit requirement could impede the ecosystem's growth and dynamism by limiting the availability of tokens for these essential functions.

Therefore, it is crucial to maintain an optimal deposit amount for the OverProtocol. This balance ensures that the deposit is not so low as to compromise the network's security, nor so high as to diminish the monetary value and utility of the OVER token. Striking this balance is key to preserving both the integrity of the blockchain and the dynamic functionality of the token within the ecosystem.

Target Deposit Ratio

The target deposit ratio is defined as the desired proportion of staked OVER tokens relative to the total circulating supply, as illustrated in the figure below:

Target Deposit Ratio

Initially, the OverProtocol sets a high target deposit ratio. This approach is adopted because, at the outset, the mainnet token often has a low market price, undermining its ability to secure the chain. A higher target deposit ratio compensates for this low value, ensuring adequate security.

However, reducing the deposit ratio can also be beneficial. The tokens not staked are crucial for on-chain activities, enhancing the monetary value of the OVER token. Therefore, once the deposit level is sufficient to assure security, it is advantageous to lower the target deposit ratio.

As the chain matures and expands its utility, gaining monetary value, a lower target deposit ratio becomes adequate for maintaining security. This gradual adjustment in the target deposit ratio is strategic, aiming to strike a harmonious balance between encouraging broad participation and efficiently managing the network's operational demands.

Target Deposit Amount

The target deposit amount represents the target aggregate quantity of OVER tokens staked within the system, as depicted in the picture below:

Target Deposit

This figure is crucial as it indicates the volume of tokens committed to securing the Proof of Stake (PoS) system. Therefore, setting an appropriate target deposit amount is a key strategic decision.

In the early stages, to align with the high target deposit ratio, it is essential to rapidly increase the total amount of tokens staked in the system. As the system evolves and stabilizes, the necessity to accumulate large new deposits diminishes. Eventually, a saturation point is reached where the accumulated deposits are sufficient to ensure system security. Following this, similar to the rationale behind reducing the target deposit ratio, the target deposit amount is capped. This cap, the max target deposit, is implemented to enhance the monetary utility of the non-staked OVER tokens, thereby supporting broader economic activities within the ecosystem.

Yield

Yield is the interest rate that measures the proportion of newly issued staking rewards given to the stakers in comparison to their original stake. Validators, responsible for executing assigned duties, earn these rewards. When the reward is minimal, approaching zero, it discourages contributions to the network, while a higher reward increases participation demand. This concept of yield plays a crucial role in maintaining a specific deposit ratio.

Base Yield

OverProtocol establishes a predetermined base yield as the foundational yield rate for staking. This base yield, applied to the maximum target deposit amount, determines the total allocation of OVER tokens as rewards for each epoch, thus forming the reward pool. In this system, the reward pool amount remains constant for each epoch, regardless of the time period. Consequently, the actual yield for each deposit is influenced by the total amount of stake deposits.

In the early stages, when a smaller amount of deposits shares the fixed reward pool, the reward distributed per token deposit is high, leading to a higher yield. However, as the total deposit volume increases, the yield per deposit decreases proportionately. Ultimately, when the deposit amount reaches the maximum target, the yield stabilizes, aligning with the predetermined base yield level.

The Feedback Mechanism

The base yield and the reward pool establish the foundational yield for validators at each specific moment within our protocol. However, the yield is dynamically adjusted from this baseline to assist the system in reaching the target deposit amount. If the actual deposit is below the target, the yield is increased; conversely, if it exceeds the target, the yield is decreased. This crucial adjustment process is known as the feedback mechanism.

The Need for Feedback Mechanism

The feedback mechanism is essential in managing fluctuations in yield demand, particularly when actual deposit deviates from our target deposit assumptions. For example, consider a scenario where the actual demand for yields is lower than expected, resulting in a deposit ratio below our projections, as depicted in the picture below. The converse situation is also plausible :

Discrepancy

Such discrepancies arise from changes in yield demand. A notable instance occurs when the US Federal Reserve raises interest rates, increasing the opportunity cost of staking in OverProtocol. Under these circumstances, the same capital might yield higher returns in alternative financial instruments. Therefore, even if OverProtocol maintains a consistent yield level, validators might prefer investing in other assets, leading to a decrease in deposits within OverProtocol. This situation illustrates how actual yield demand can diverge from our initial assumptions.

Irrespective of external factors, maintaining a specific deposit ratio is crucial for the security of OverProtocol. To ensure this, we have implemented a feedback mechanism that dynamically adjusts our yields. Modifying the yield either upwards or downwards serves as an incentive or disincentive for participation, thereby influencing deposit levels.

Feedback Mechanism

The feedback mechanism functions by assessing the discrepancy between the target and actual deposit ratios, subsequently fine-tuning the yields. The Validator Pending Queue\textit{Validator Pending Queue} comprises validators attempting to enter the system, while the Validator Exit Queue\textit{Validator Exit Queue} includes those trying to exit. The net demand difference, derived from the size disparity between these two queues, indicates the overall interest in becoming an OVER validator. By adding the original number of validators for the current epoch, we can calculate the number of validators for the next epoch as follows:

Validatornext=Validatorcurrent+Pending QueuesizeExit Queuesize\begin{align} \text{Validator}_{\text{next}} = \text{Validator}_{\text{current}} + \text{Pending Queue}_{\text{size}} - \text{Exit Queue}_{\text{size}} \end{align}

If the total deposit amount, inferred from Validatornext\text{Validator}_{\text{next}}, exceeds the target deposit amount for that timeframe, the yield decreases, and vice versa. To safeguard against attacks and prevent excessively high or low yield levels, we propose implementing upper and lower yield bounds. Additionally, the speed of feedback adjustment is a critical aspect. Denoting the feedback adjustment as f(t)f(t), its speed is defined as:

df(t)dt=kMaturity FactorScaling Factor\begin{align} \frac{df(t)}{dt} = k \cdot \text{Maturity Factor} \cdot \text{Scaling Factor} \end{align}

The Maturity Factor\textit{Maturity Factor} is introduced to steer the system towards a more stable and mature state. The system evaluates the current deposit level against the maximum target deposit amount, increasing the rate of feedback change proportionally to the discrepancy. In essence, the farther the system is from the maximum target, the faster the adjustments occur. The Scaling Factor\textit{Scaling Factor} enables the system to rapidly align with the target, where a larger discrepancy between the actual and target deposit amounts accelerates the feedback change rate.

To implement such a feedback mechanism, the system requires two key components: the Adaptive Validator Churn Limit\textit{Adaptive Validator Churn Limit} and The Issuance Reserve\textit{The Issuance Reserve}. The explanation follows.

Adaptive Validator Churn Limit

In a blockchain Proof of Stake (PoS) system, the 'churn limit' refers to the maximum number of validators permitted to enter or exit the network within a specific period. We observed that our feedback system fails to reach a stable status when the number of newly activated validators is equal to the number of exiting validators. To address this, we have implemented an Adaptive Validator Churn Limit\textit{Adaptive Validator Churn Limit} that adjusts based on current conditions. Specifically, if the number of active validators exceeds the target, the limit for exiting validators is set higher than that for incoming validators. Conversely, if the number of active validators falls below the target, the limit for incoming validators is increased beyond that for exiting ones. This approach effectively alters the number of active validators as needed and facilitates adjustments when the number of active validators is either above or below the target, thus promoting a more flexible and efficient system operation.

The Issuance Reserve

The primary function of the issuance reserve is to manage the allocation of additional rewards when needed. Specifically, the reserve is pre-allocated 100 million OVER and serves as a resource for the feedback system to augment rewards when necessary. If the system determines that more rewards should be distributed, the additional amount is provided from this reserve. However, no more than this pre-allocated amount will be issued. The management of the issuance reserve is handled at the protocol level, protecting it from risks such as account hacking and ensuring its use is strictly limited to the dynamic adjustment of yields.

- + \ No newline at end of file diff --git a/learn/key-features/tokenomics/overview.html b/learn/key-features/tokenomics/overview.html index 87f9f82..a1dfe37 100644 --- a/learn/key-features/tokenomics/overview.html +++ b/learn/key-features/tokenomics/overview.html @@ -4,13 +4,13 @@ Tokenomics Overview | OverProtocol Docs - +

Tokenomics Overview

Principles of Design

The OverProtocol is dedicated to crafting a robust tokenomics framework, guided by two fundamental principles throughout its design.

Firstly, the tokenomics should enhance the security of the blockchain. In Proof of Stake(PoS) systems, The higher the value of tokens protecting the network, the more resilient it becomes against potential attacks. Thus, the system must provide adequate incentives to attract a substantial number of validators who secure the network with their tokens.

Second, the tokenomics should emphasize stability, creating a dependable environment that fosters user engagement with the asset and its underlying network. Stability boosts user confidence and equips the system to handle challenges effectively. By being less vulnerable to rapid changes, the system gains the resilience to respond to external influences and facilitate recovery.

With these principles as our guide, we meticulously develop our tokenomics strategy, covering allocation, issuance, fees, yield, and other critical elements to ensure alignment with these core tenets. Let’s delve into each of these key components in detail.

OVER Token

The native token of OverProtocol is 'Over,' with the symbol 'OVER.' This is the primary currency required for participating in and utilizing the OverProtocol. While there are other tokens on OverProtocol, OVER is the most essential to the protocol’s operations. It facilitates transactions by covering gas fees and is crucial to network security. Users participate in the Proof of Stake (PoS) consensus by staking OVER, contributing to the network’s resilience and trustworthiness.

- + \ No newline at end of file diff --git a/operators.html b/operators.html index cf9a8c1..2d6683f 100644 --- a/operators.html +++ b/operators.html @@ -4,13 +4,13 @@ Operator Guides | OverProtocol Docs - +

Operator Guides

Running your own node on the OverProtocol blockchain is not just about participating in the network; it’s about actively contributing to the stability and decentralization of the ecosystem. Nodes play a critical role in processing transactions and validating blocks, making the network more resilient and trustworthy. This guide provides a comprehensive overview of setting up your own node and becoming a validator in the OverProtocol ecosystem.

Benefits of Running Your Own Node

  • Increased Trust and Security: Operating your own node allows you to independently verify transactions without relying on third-party services.
  • Support for the Network: By running a node, you contribute to the network’s health and decentralization, reducing the risk of central points of failure.
  • Direct Participation in Consensus: As a validator, you play a part in the consensus process, influencing the network’s integrity and progression.
  • Potential Rewards: Validators who actively participate in consensus can earn rewards, incentivizing the maintenance and operation of the network. Also by running a restoration client, you can help users restore expired accounts and receive additional rewards.

Step-by-Step Guide to Setting Up Your Node

  1. Check Hardware Requirements: Ensure you have the necessary hardware that meets the specifications required for running a node on OverProtocol.
  2. Software Installation: Follow the installation instructions specific to your operating system.
  3. Syncing the Blockchain: Before your node can start validating, it must sync with the existing blockchain data. This process can take several minutes to hours, depending on the network's size and your internet speed.
  4. Register as a Validator: Once your node is set up and fully synced, you need to register as a validator. This involves locking up a certain amount of the OVER tokens as a stake, signifying your commitment to the network’s integrity.
  5. Start Validating: With your node running and registered as a validator, you will begin to participate in the creation and validation of blocks. Monitor your node’s performance and participate in the OverProtocol blockchain consensus as required.

Maintaining Your Node

  • Regular Updates: Keep your node software updated to the latest version to ensure compatibility with network changes and enhancements.
  • Security Practices: Implement strong security practices to protect your node from unauthorized access and potential threats. This includes securing SSH access, and regularly updating your operating system and software.
  • Monitoring and Alerts: Set up monitoring tools to keep track of your node’s operation and health. Configure alerts for downtime or performance issues to quickly address potential problems.

Contributing to OverProtocol’s Ecosystem

Running your own node goes beyond technical setup and into active ecosystem participation. Engage with the community, provide feedback, and contribute to discussions about future upgrades and directions. By being an active participant, you help shape the evolution of OverProtocol.

- + \ No newline at end of file diff --git a/operators/CLI-options/chronos.html b/operators/CLI-options/chronos.html index 2cc5589..5ac0650 100644 --- a/operators/CLI-options/chronos.html +++ b/operators/CLI-options/chronos.html @@ -4,13 +4,13 @@ Command Line Options | OverProtocol Docs - +

Command Line Options

beacon-chain

The Chronos beacon-chain binary is the node client responsible for the consensus layer in the Over Protocol. The beacon-chain allows users to modify various settings according to their needs, and a description of these settings can be displayed using the help command as shown below.

beacon-chain help
NAME:
beacon-chain - this is a beacon chain implementation for Over Protocol
USAGE:
beacon-chain [options] command [command options] [arguments...]

AUTHOR:


GLOBAL OPTIONS:
db defines commands for interacting with the Over Protocol Beacon Node database
generate-auth-secret creates a random, 32 byte hex string in a plaintext file to be used for authenticating JSON-RPC requests. If no --output-file flag is defined, the file will be created in the current working directory
help, h Shows a list of commands or help for one command

cmd OPTIONS:
--accept-terms-of-use Accept Terms and Conditions (for non-interactive environments) (default: false)
--api-timeout value Specifies the timeout value for API requests in seconds (default: 120)
--bootstrap-node value [ --bootstrap-node value ] The address of bootstrap node. Beacon node will connect for peer discovery via DHT. Multiple nodes can be passed by using the flag multiple times but not comma-separated. You can also pass YAML files containing multiple nodes.
--chain-config-file value The path to a YAML file with chain config values
--clear-db Prompt for clearing any previously stored data at the data directory (default: false)
--config-file value The filepath to a yaml file with flag values
--datadir value Data directory for the databases (default: "${HOME}/Eth2")
--db-backup-output-dir value Output directory for db backups
--disable-monitoring Disable monitoring service. (default: false)
--e2e-config Use the E2E testing config, only for use within end-to-end testing. (default: false)
--enable-tracing Enable request tracing. (default: false)
--force-clear-db Clear any previously stored data at the data directory (default: false)
--grpc-max-msg-size value Integer to define max receive message call size. If serving a public gRPC server, set this to a more reasonable size to avoid resource exhaustion from large messages. Validators with as many as 10000 keys can be run with a max message size of less than 50Mb. The default here is set to a very high value for local users. (default: 2147483647 (2Gi)). (default: 2147483647)
--max-goroutines value Specifies the upper limit of goroutines running before a status check fails (default: 5000)
--minimal-config Use minimal config with parameters as defined in the spec. (default: false)
--monitor-indices value [ --monitor-indices value ] List of validator indices to track performance
--monitoring-host value Host used for listening and responding metrics for prometheus. (default: "127.0.0.1")
--monitoring-port value Port used to listening and respond metrics for prometheus. (default: 8080)
--no-discovery Enable only local network p2p and do not connect to cloud bootstrap nodes. (default: false)
--p2p-tcp-port value The port used by libp2p. (default: 13000)
--p2p-udp-port value The port used by discv5. (default: 12000)
--relay-node value The address of relay node. The beacon node will connect to the relay node and advertise their address via the relay node to other peers
--restore-source-file value Filepath to the backed-up database file which will be used to restore the database
--restore-target-dir value Target directory of the restored database (default: "${HOME}/Eth2")
--rpc-max-page-size value Max number of items returned per page in RPC responses for paginated endpoints. (default: 0)
--trace-sample-fraction value Indicate what fraction of p2p messages are sampled for tracing. (default: 0.2)
--tracing-endpoint value Tracing endpoint defines where beacon chain traces are exposed to Jaeger. (default: "http://127.0.0.1:14268/api/traces")
--tracing-process-name value The name to apply to tracing tag "process_name"
--verbosity value Logging verbosity (trace, debug, info=default, warn, error, fatal, panic) (default: "info")

debug OPTIONS:
--blockprofilerate value Turn on block profiling with the given rate (default: 0)
--cpuprofile value Write CPU profile to the given file
--memprofilerate value Turn on memory profiling with the given rate (default: 524288)
--mutexprofilefraction value Turn on mutex profiling with the given rate (default: 0)
--pprof Enable the pprof HTTP server (default: false)
--pprofaddr value pprof HTTP server listening interface (default: "127.0.0.1")
--pprofport value pprof HTTP server listening port (default: 6060)
--trace value Write execution trace to the given file

beacon-chain OPTIONS:
--block-batch-limit value The amount of blocks the local peer is bounded to request and respond to in a batch. (default: 64)
--block-batch-limit-burst-factor value The factor by which block batch limit may increase on burst. (default: 2)
--chain-id value Sets the chain id of the beacon chain (default: 0)
--checkpoint-block value Rather than syncing from genesis, you can start processing from a ssz-serialized BeaconState+Block. This flag allows you to specify a local file containing the checkpoint Block to load.
--checkpoint-state value Rather than syncing from genesis, you can start processing from a ssz-serialized BeaconState+Block. This flag allows you to specify a local file containing the checkpoint BeaconState to load.
--checkpoint-sync-url value URL of a synced beacon node to trust in obtaining checkpoint sync data. As an additional safety measure, it is strongly recommended to only use this option in conjunction with --weak-subjectivity-checkpoint flag
--contract-deployment-block value The eth1 block in which the deposit contract was deployed. (default: 11184524)
--deposit-contract value Deposit contract address. Beacon chain node will listen logs coming from the deposit contract to determine when validator is eligible to participate. (default: "000000000000000000000000000000000beac017")
--disable-grpc-gateway Disable the gRPC gateway for JSON-HTTP requests (default: false)
--enable-debug-rpc-endpoints Enables the debug rpc service, containing utility endpoints such as /eth/v1alpha1/beacon/state. (default: false)
--enable-over-node-rpc-endpoints Enables the OverNode rpc service, containing utility endpoints such as /v2/over-node. (default: false)
--engine-endpoint-timeout-seconds value Sets the execution engine timeout (seconds) for execution payload semantics (forkchoiceUpdated, newPayload) (default: 0)
--eth1-header-req-limit value Sets the maximum number of headers that a deposit log query can fetch. (default: 1000)
--execution-endpoint value An execution client http endpoint. Can contain auth header as well in the format (default: "http://localhost:8551")
--execution-headers value A comma separated list of key value pairs to pass as HTTP headers for all execution client calls. Example: --execution-headers=key1=value1,key2=value2
--gc-percent value The percentage of freshly allocated data to live data on which the gc will be run again. (default: 100)
--genesis-beacon-api-url value URL of a synced beacon node to trust for obtaining genesis state. As an additional safety measure, it is strongly recommended to only use this option in conjunction with --weak-subjectivity-checkpoint flag
--genesis-state value Load a genesis state from ssz file. Testnet genesis files can be found in the eth2-clients/eth2-testnets repository on github.
--grpc-gateway-corsdomain value Comma separated list of domains from which to accept cross origin requests (browser enforced). This flag has no effect if not used with --grpc-gateway-port. (default: "http://localhost:4200,http://localhost:7500,http://127.0.0.1:4200,http://127.0.0.1:7500,http://0.0.0.0:4200,http://0.0.0.0:7500,http://localhost:3000,http://0.0.0.0:3000,http://127.0.0.1:3000")
--grpc-gateway-host value The host on which the gateway server runs on (default: "127.0.0.1")
--grpc-gateway-port value The port on which the gateway server runs on (default: 3500)
--historical-slasher-node Enables required flags for serving historical data to a slasher client. Results in additional storage usage (default: false)
--http-mev-relay value A MEV builder relay string http endpoint, this wil be used to interact MEV builder network using API defined in: https://ethereum.github.io/builder-specs/#/Builder
--http-modules prysm,eth Comma-separated list of API module names. Possible values: prysm,eth. (default: "prysm,eth")
--interop-eth1data-votes Enable mocking of eth1 data votes for proposers to package into blocks (default: false)
--jwt-secret value REQUIRED if connecting to an execution node via HTTP. Provides a path to a file containing a hex-encoded string representing a 32 byte secret used for authentication with an execution node via HTTP. If this is not set, all requests to execution nodes via HTTP for consensus-related calls will fail, which will prevent your validators from performing their duties. This is not required if using an IPC connection.
--max-builder-consecutive-missed-slots value Number of consecutive skip slot to fallback from using relay/builder to local execution engine for block construction (default: 3)
--max-builder-epoch-missed-slots value Number of total skip slot to fallback from using relay/builder to local execution engine for block construction in last epoch rolling window (default: 8)
--minimum-peers-per-subnet value Sets the minimum number of peers that a node will attempt to peer with that are subscribed to a subnet. (default: 6)
--network-id value Sets the network id of the beacon chain. (default: 0)
--rpc-host value Host on which the RPC server should listen (default: "127.0.0.1")
--rpc-port value RPC port exposed by a beacon node (default: 4000)
--slasher-datadir value Directory for the slasher database (default: "${HOME}/Eth2")
--slots-per-archive-point value The slot durations of when an archived state gets saved in the beaconDB. (default: 2048)
--subscribe-all-subnets Subscribe to all possible attestation and sync subnets. (default: false)
--tls-cert value Certificate for secure gRPC. Pass this and the tls-key flag in order to use gRPC securely.
--tls-key value Key for secure gRPC. Pass this and the tls-cert flag in order to use gRPC securely.
--weak-subjectivity-checkpoint block_root:epoch_number Input in block_root:epoch_number format. This guarantees that syncing leads to the given Weak Subjectivity Checkpoint along the canonical chain. If such a sync is not possible, the node will treat it as a critical and irrecoverable failure

merge OPTIONS:
--suggested-fee-recipient value Post bellatrix, this address will receive the transaction fees produced by any blocks from this node. Default to junk whilst bellatrix is in development state. Validator client can override this value through the preparebeaconproposer api. (default: "0x0000000000000000000000000000000000000000")
--terminal-block-hash-epoch-override value Sets the block hash epoch to manual overrides the default TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH value. WARNING: This flag should be used only if you have a clear understanding that community has decided to override the terminal block hash activation epoch. Incorrect usage will result in your node experience consensus failure. (default: 0)
--terminal-block-hash-override value Sets the block hash to manual overrides the default TERMINAL_BLOCK_HASH value. WARNING: This flag should be used only if you have a clear understanding that community has decided to override the terminal block hash. Incorrect usage will result in your node experience consensus failure.
--terminal-total-difficulty-override value Sets the total difficulty to manual overrides the default TERMINAL_TOTAL_DIFFICULTY value. WARNING: This flag should be used only if you have a clear understanding that community has decided to override the terminal difficulty. Incorrect usage will result in your node experience consensus failure.

p2p OPTIONS:
--enable-upnp Enable the service (Beacon chain or Validator) to use UPnP when possible. (default: false)
--min-sync-peers value The required number of valid peers to connect with before syncing. (default: 3)
--p2p-allowlist value The CIDR subnet for allowing only certain peer connections. Using "public" would allow only public subnets. Example: 192.168.0.0/16 would permit connections to peers on your local network only. The default is to accept all connections.
--p2p-colocation-limit value The maximum number of peers we can see from a single ip or ipv6 subnet. (default: 5)
--p2p-colocation-whitelist value [ --p2p-colocation-whitelist value ] Whitelist of CIDR subnets that not scoring peer with IP-colocation factor.
--p2p-denylist value [ --p2p-denylist value ] The CIDR subnets for denying certainty peer connections. Using "private" would deny all private subnets. Example: 192.168.0.0/16 would deny connections from peers on your local network only. The default is to accept all connections.
--p2p-host-dns value The DNS address advertised by libp2p. This may be used to advertise an external DNS.
--p2p-host-ip value The IP address advertised by libp2p. This may be used to advertise an external IP.
--p2p-ip-tracker-ban-time value The interval in minutes to prune the ip tracker, default is 120m (default: 2h0m0s)
--p2p-local-ip value The local ip address to listen for incoming data.
--p2p-max-peers value The max number of p2p peers to maintain. (default: 70)
--p2p-metadata value The file containing the metadata to communicate with other peers.
--p2p-priv-key value The file containing the private key to use in communications with other peers.
--p2p-static-id Enables the peer id of the node to be fixed by saving the generated network key to the default key path. (default: false)
--peer value [ --peer value ] Connect with this peer, this flag may be used multiple times. This peer is recognized as a trusted peer.

log OPTIONS:
--log-compress Compress the log files (default: false)
--log-file value Specify log file name, relative or absolute
--log-format value Specify log formatting. Supports: text, json, fluentd, journald. (default: "text")
--log-maxage value Maximum number of days to retain a log file (default: 30)
--log-maxbackups value Maximum number of log files to retain (default: 10)
--log-maxsize value Maximum size in MBs of a single log file (default: 100)
--log-rotate Enables log file rotation (default: false)

features OPTIONS:
--aggregate-parallel Enables parallel aggregation of attestations (default: false)
--dev Enable experimental features still in development. These features may not be stable. (default: false)
--diable-registration-cache A temporary flag for disabling the validator registration cache instead of using the db. note: registrations do not clear on restart while using the db (default: false)
--disable-broadcast-slashings Disables broadcasting slashings submitted to the beacon node. (default: false)
--disable-build-block-parallel Disables building a beacon block in parallel for consensus and execution (default: false)
--disable-check-bad-peer (Danger): Disables checking if a peer is bad. Do NOT use this in production! (default: false)
--disable-grpc-connection-logging Disables displaying logs for newly connected grpc clients (default: false)
--disable-peer-scorer (Danger): Disables P2P peer scorer. Do NOT use this in production! (default: false)
--disable-reorg-late-blocks Disables reorgs of late blocks (default: false)
--disable-resource-manager Disables running the libp2p resource manager (default: false)
--disable-staking-contract-check Disables checking of staking contract deposits when proposing blocks, useful for devnets (default: false)
--dolphin Run Chronos configured for the Dolphin test network (default: false)
--enable-full-ssz-data-logging Enables displaying logs for full ssz data on rejected gossip messages (default: false)
--enable-historical-state-representation Enables the beacon chain to save historical states in a space efficient manner. (Warning): Once enabled, this feature migrates your database in to a new schema and there is no going back. At worst, your entire database might get corrupted. (default: false)
--enable-optional-engine-methods Enables the optional engine methods (default: false)
--enable-verbose-sig-verification Enables identifying invalid signatures if batch verification fails when processing block (default: false)
--interop-write-ssz-state-transitions Write ssz states to disk after attempted state transition (default: false)
--mainnet Run on Over Protocol Beacon Chain Main Net. This is the default and can be omitted. (default: true)
--prepare-all-payloads Informs the engine to prepare all local payloads. Useful for relayers and builders (default: false)
--save-full-execution-payloads Saves beacon blocks with full execution payloads instead of execution payload headers in the database (default: false)
--slasher Enables a slasher in the beacon node for detecting slashable offenses (default: false)

interop OPTIONS:
--genesis-state value Load a genesis state from ssz file. Testnet genesis files can be found in the eth2-clients/eth2-testnets repository on github.
--interop-genesis-time value Specify the genesis time for interop genesis state generation. Must be used with --interop-num-validators (default: 0)
--interop-num-validators value Specify number of genesis validators to generate for interop. Must be used with --interop-genesis-time (default: 0)

Validator

validator help
NAME:
validator - launches an Over Protocol validator client that interacts with a beacon chain, starts proposer and attester services, p2p connections, and more
USAGE:
validator [options] command [command options] [arguments...]

AUTHOR:


GLOBAL OPTIONS:
wallet defines commands for interacting with Over Protocol validator wallets
accounts defines commands for interacting with Over Protocol validator accounts
slashing-protection-history defines commands for interacting your validator's slashing protection history
db defines commands for interacting with the Chronos validator database
help, h Shows a list of commands or help for one command

cmd OPTIONS:
--accept-terms-of-use Accept Terms and Conditions (for non-interactive environments) (default: false)
--api-timeout value Specifies the timeout value for API requests in seconds (default: 120)
--chain-config-file value The path to a YAML file with chain config values
--clear-db Prompt for clearing any previously stored data at the data directory (default: false)
--config-file value The filepath to a yaml file with flag values
--datadir value Data directory for the databases (default: "${HOME}/Eth2")
--db-backup-output-dir value Output directory for db backups
--disable-monitoring Disable monitoring service. (default: false)
--e2e-config Use the E2E testing config, only for use within end-to-end testing. (default: false)
--enable-db-backup-webhook Serve HTTP handler to initiate database backups. The handler is served on the monitoring port at path /db/backup. (default: false)
--enable-tracing Enable request tracing. (default: false)
--force-clear-db Clear any previously stored data at the data directory (default: false)
--grpc-max-msg-size value Integer to define max receive message call size. If serving a public gRPC server, set this to a more reasonable size to avoid resource exhaustion from large messages. Validators with as many as 10000 keys can be run with a max message size of less than 50Mb. The default here is set to a very high value for local users. (default: 2147483647 (2Gi)). (default: 2147483647)
--log-compress Compress the log files (default: false)
--log-file value Specify log file name, relative or absolute
--log-format value Specify log formatting. Supports: text, json, fluentd, journald. (default: "text")
--log-maxage value Maximum number of days to retain a log file (default: 30)
--log-maxbackups value Maximum number of log files to retain (default: 10)
--log-maxsize value Maximum size in MBs of a single log file (default: 100)
--log-rotate Enables log file rotation (default: false)
--minimal-config Use minimal config with parameters as defined in the spec. (default: false)
--monitoring-host value Host used for listening and responding metrics for prometheus. (default: "127.0.0.1")
--monitoring-port value Port used to listening and respond metrics for prometheus. (default: 8081)
--trace-sample-fraction value Indicate what fraction of p2p messages are sampled for tracing. (default: 0.2)
--tracing-endpoint value Tracing endpoint defines where beacon chain traces are exposed to Jaeger. (default: "http://127.0.0.1:14268/api/traces")
--tracing-process-name value The name to apply to tracing tag "process_name"
--verbosity value Logging verbosity (trace, debug, info=default, warn, error, fatal, panic) (default: "info")

debug OPTIONS:
--blockprofilerate value Turn on block profiling with the given rate (default: 0)
--cpuprofile value Write CPU profile to the given file
--memprofilerate value Turn on memory profiling with the given rate (default: 524288)
--mutexprofilefraction value Turn on mutex profiling with the given rate (default: 0)
--pprof Enable the pprof HTTP server (default: false)
--pprofaddr value pprof HTTP server listening interface (default: "127.0.0.1")
--pprofport value pprof HTTP server listening port (default: 6060)
--trace value Write execution trace to the given file

validator OPTIONS:
--beacon-rest-api-provider value Beacon node REST API provider endpoint (default: "http://127.0.0.1:3500")
--beacon-rpc-gateway-provider value Beacon node RPC gateway provider endpoint (default: "127.0.0.1:3500")
--beacon-rpc-provider value Beacon node RPC provider endpoint (default: "127.0.0.1:4000")
--disable-account-metrics Disable prometheus metrics for validator accounts. Operators with high volumes of validating keys may wish to disable granular prometheus metrics as it increases the data cardinality. (default: false)
--disable-rewards-penalties-logging Disable reward/penalty logging during cluster deployment (default: false)
--enable-builder, --enable-validator-registration Enables Builder validator registration APIs for the validator client to update settings such as fee recipient and gas limit. Note* this flag is not required if using proposer settings config file (default: false)
--graffiti value String to include in proposed blocks
--graffiti-file value The path to a YAML file with graffiti values
--grpc-gateway-corsdomain value Comma separated list of domains from which to accept cross origin requests (browser enforced). This flag has no effect if not used with --grpc-gateway-port. (default: "http://localhost:7500,http://127.0.0.1:7500,http://0.0.0.0:7500,http://localhost:4242,http://127.0.0.1:4242,http://localhost:4200,http://0.0.0.0:4242,http://127.0.0.1:4200,http://0.0.0.0:4200,http://localhost:3000,http://0.0.0.0:3000,http://127.0.0.1:3000")
--grpc-gateway-host value The host on which the gateway server runs on (default: "127.0.0.1")
--grpc-gateway-port value Enable gRPC gateway for JSON requests (default: 7500)
--grpc-headers value A comma separated list of key value pairs to pass as gRPC headers for all gRPC calls. Example: --grpc-headers=key=value
--grpc-retries value Number of attempts to retry gRPC requests (default: 5)
--grpc-retry-delay value The amount of time between gRPC retry requests. (default: 1s)
--over-node Enables validator client for OverNode (default: false)
--proposer-settings-file value Set path to a YAML or JSON file containing validator settings used when proposing blocks such as (fee recipient and gas limit) (i.e. --proposer-settings-file=/path/to/proposer.json). File format found in docs
--proposer-settings-url value Set URL to a REST endpoint containing validator settings used when proposing blocks such as (fee recipient) (i.e. --proposer-settings-url=https://example.com/api/getConfig). File format found in docs
--rpc Enables the RPC server for the validator client (without Web UI) (default: false)
--rpc-host value Host on which the RPC server should listen (default: "127.0.0.1")
--rpc-port value RPC port exposed by a validator client (default: 7000)
--slasher-rpc-provider value Slasher node RPC provider endpoint (default: "127.0.0.1:4002")
--slasher-tls-cert value Certificate for secure slasher gRPC. Pass this and the tls-key flag in order to use gRPC securely.
--suggested-fee-recipient value Sets ALL validators' mapping to a suggested eth address to receive gas fees when proposing a block. note that this is only a suggestion when integrating with a Builder API, which may choose to specify a different fee recipient as payment for the blocks it builds. For additional setting overrides use the --proposer-settings-file or --proposer-settings-url Flags. (default: "0x0000000000000000000000000000000000000000")
--suggested-gas-limit value Sets gas limit for the builder to use for constructing a payload for all the validators (default: "30000000")
--tls-cert value Certificate for secure gRPC. Pass this and the tls-key flag in order to use gRPC securely.
--wallet-dir value Path to a wallet directory on-disk for Chronos validator accounts (default: "${HOME}/Eth2Validators/chronos-wallet-v2")
--wallet-password-file value Path to a plain-text, .txt file containing your wallet password

features OPTIONS:
--attest-timely Fixes validator can attest timely after current block processes. See #8185 for more details (default: false)
--dolphin Run Chronos configured for the Dolphin test network (default: false)
--dynamic-key-reload-debounce-interval value (Advanced): Specifies the time duration the validator waits to reload new keys if they have changed on disk. Default 1s, can be any type of duration such as 1.5s, 1000ms, 1m. (default: 1s)
--enable-beacon-rest-api Experimental enable of the beacon REST API when querying a beacon node (default: false)
--enable-doppelganger Enables the validator to perform a doppelganger check. (Warning): This is not a foolproof method to find duplicate instances in the network. Your validator will still be vulnerable if it is being run in unsafe configurations. (default: false)
--enable-external-slasher-protection Enables the validator to connect to a beacon node using the --slasher flagfor remote slashing protection (default: false)
--enable-slashing-protection-history-pruning Enables the pruning of the validator client's slashing protection database (default: false)
--mainnet Run on Over Protocol Beacon Chain Main Net. This is the default and can be omitted. (default: true)

interop OPTIONS:
--interop-num-validators value The number of validators to deterministically generate. Example: --interop-start-index=5 --interop-num-validators=3 would generate keys from index 5 to 7. (default: 0)
--interop-start-index value The start index to deterministically generate validator keys when used in combination with --interop-num-validators. Example: --interop-start-index=5 --interop-num-validators=3 would generate keys from index 5 to 7. (default: 0)


- + \ No newline at end of file diff --git a/operators/CLI-options/kairos.html b/operators/CLI-options/kairos.html index e6dafc5..204cfd0 100644 --- a/operators/CLI-options/kairos.html +++ b/operators/CLI-options/kairos.html @@ -4,13 +4,13 @@ Command Line Options | OverProtocol Docs - +

Command Line Options

geth

As the Kairos project is a fork of the Geth project, it provides most of the command line options available in geth. You can obtain information and descriptions of these options by using the command line help in the Geth binary.

geth --help                                                                                                            
NAME:
geth - the kairos command line interface

USAGE:
geth [global options] command [command options] [arguments...]

VERSION:
1.0.3-unstable-ff8c2d62-20240605
q
COMMANDS:
account Manage accounts
attach Start an interactive JavaScript environment (connect to node)
console Start an interactive JavaScript environment
db Low level database operations
dump Dump a specific block from storage
dumpconfig Export configuration values in a TOML format
dumpgenesis Dumps genesis block JSON configuration to stdout
export Export blockchain into file
import Import a blockchain file
import-preimages Import the preimage database from an RLP stream
init Bootstrap and initialize a new genesis block
js (DEPRECATED) Execute the specified JavaScript files
license Display license information
removedb Remove blockchain and state databases
show-deprecated-flags Show flags that have been deprecated
snapshot A set of commands based on the snapshot
verkle A set of experimental verkle tree management commands
version Print version numbers
version-check Checks (online) for known Geth security vulnerabilities
help, h Shows a list of commands or help for one command

GLOBAL OPTIONS:
ACCOUNT


--allow-insecure-unlock (default: false) ($GETH_ALLOW_INSECURE_UNLOCK)
Allow insecure account unlocking when account-related RPCs are exposed by http

--keystore value ($GETH_KEYSTORE)
Directory for the keystore (default = inside the datadir)

--lightkdf (default: false) ($GETH_LIGHTKDF)
Reduce key-derivation RAM & CPU usage at some expense of KDF strength

--password value ($GETH_PASSWORD)
Password file to use for non-interactive password input

--pcscdpath value ($GETH_PCSCDPATH)
Path to the smartcard daemon (pcscd) socket file

--signer value ($GETH_SIGNER)
External signer (url or path to ipc file)

--unlock value ($GETH_UNLOCK)
Comma separated list of accounts to unlock

--usb (default: false) ($GETH_USB)
Enable monitoring and management of USB hardware wallets

ALIASED (deprecated)


--cache.trie.journal value ($GETH_CACHE_TRIE_JOURNAL)
Disk journal directory for trie cache to survive node restarts

--cache.trie.rejournal value (default: 0s) ($GETH_CACHE_TRIE_REJOURNAL)
Time interval to regenerate the trie cache journal

--log.backtrace value ($GETH_LOG_BACKTRACE)
Request a stack trace at a specific logging statement (deprecated)

--log.debug (default: false) ($GETH_LOG_DEBUG)
Prepends log messages with call-site location (deprecated)

--nousb (default: false) ($GETH_NOUSB)
Disables monitoring for and managing USB hardware wallets (deprecated)

--txlookuplimit value (default: 2350000) ($GETH_TXLOOKUPLIMIT)
Number of recent blocks to maintain transactions index for (default = about one
year, 0 = entire chain) (deprecated, use history.transactions instead)

--v5disc (default: false) ($GETH_V5DISC)
Enables the experimental RLPx V5 (Topic Discovery) mechanism (deprecated, use
--discv5 instead)

--whitelist value ($GETH_WHITELIST)
Comma separated block number-to-hash mappings to enforce (<number>=<hash>)
(deprecated in favor of --eth.requiredblocks)

API AND CONSOLE


--authrpc.addr value (default: "localhost") ($GETH_AUTHRPC_ADDR)
Listening address for authenticated APIs

--authrpc.jwtsecret value ($GETH_AUTHRPC_JWTSECRET)
Path to a JWT secret to use for authenticated RPC endpoints

--authrpc.port value (default: 8551) ($GETH_AUTHRPC_PORT)
Listening port for authenticated APIs

--authrpc.vhosts value (default: "localhost") ($GETH_AUTHRPC_VHOSTS)
Comma separated list of virtual hostnames from which to accept requests (server
enforced). Accepts '*' wildcard.

--exec value ($GETH_EXEC)
Execute JavaScript statement

--graphql (default: false) ($GETH_GRAPHQL)
Enable GraphQL on the HTTP-RPC server. Note that GraphQL can only be started if
an HTTP server is started as well.

--graphql.corsdomain value ($GETH_GRAPHQL_CORSDOMAIN)
Comma separated list of domains from which to accept cross origin requests
(browser enforced)

--graphql.vhosts value (default: "localhost") ($GETH_GRAPHQL_VHOSTS)
Comma separated list of virtual hostnames from which to accept requests (server
enforced). Accepts '*' wildcard.

--header value, -H value ($GETH_HEADER)
Pass custom headers to the RPC server when using --remotedb or the geth attach
console. This flag can be given multiple times.

--http (default: false) ($GETH_HTTP)
Enable the HTTP-RPC server

--http.addr value (default: "localhost") ($GETH_HTTP_ADDR)
HTTP-RPC server listening interface

--http.api value ($GETH_HTTP_API)
API's offered over the HTTP-RPC interface

--http.corsdomain value ($GETH_HTTP_CORSDOMAIN)
Comma separated list of domains from which to accept cross origin requests
(browser enforced)

--http.port value (default: 8545) ($GETH_HTTP_PORT)
HTTP-RPC server listening port

--http.rpcprefix value ($GETH_HTTP_RPCPREFIX)
HTTP path path prefix on which JSON-RPC is served. Use '/' to serve on all
paths.

--http.vhosts value (default: "localhost") ($GETH_HTTP_VHOSTS)
Comma separated list of virtual hostnames from which to accept requests (server
enforced). Accepts '*' wildcard.

--ipcdisable (default: false) ($GETH_IPCDISABLE)
Disable the IPC-RPC server

--ipcpath value ($GETH_IPCPATH)
Filename for IPC socket/pipe within the datadir (explicit paths escape it)

--jspath value (default: .) ($GETH_JSPATH)
JavaScript root path for `loadScript`

--preload value ($GETH_PRELOAD)
Comma separated list of JavaScript files to preload into the console

--rpc.allow-unprotected-txs (default: false) ($GETH_RPC_ALLOW_UNPROTECTED_TXS)
Allow for unprotected (non EIP155 signed) transactions to be submitted via RPC

--rpc.batch-request-limit value (default: 1000) ($GETH_RPC_BATCH_REQUEST_LIMIT)
Maximum number of requests in a batch

--rpc.batch-response-max-size value (default: 25000000) ($GETH_RPC_BATCH_RESPONSE_MAX_SIZE)
Maximum number of bytes returned from a batched call

--rpc.enabledeprecatedpersonal (default: false) ($GETH_RPC_ENABLEDEPRECATEDPERSONAL)
Enables the (deprecated) personal namespace

--rpc.evmtimeout value (default: 5s) ($GETH_RPC_EVMTIMEOUT)
Sets a timeout used for eth_call (0=infinite)

--rpc.gascap value (default: 50000000) ($GETH_RPC_GASCAP)
Sets a cap on gas that can be used in eth_call/estimateGas (0=infinite)

--rpc.txfeecap value (default: 1) ($GETH_RPC_TXFEECAP)
Sets a cap on transaction fee (in ether) that can be sent via the RPC APIs (0 =
no cap)

--ws (default: false) ($GETH_WS)
Enable the WS-RPC server

--ws.addr value (default: "localhost") ($GETH_WS_ADDR)
WS-RPC server listening interface

--ws.api value ($GETH_WS_API)
API's offered over the WS-RPC interface

--ws.origins value ($GETH_WS_ORIGINS)
Origins from which to accept websockets requests

--ws.port value (default: 8546) ($GETH_WS_PORT)
WS-RPC server listening port

--ws.rpcprefix value ($GETH_WS_RPCPREFIX)
HTTP path prefix on which JSON-RPC is served. Use '/' to serve on all paths.

DEVELOPER CHAIN


--dev (default: false) ($GETH_DEV)
Ephemeral proof-of-authority network with a pre-funded developer account, mining
enabled

--dev.gaslimit value (default: 11500000) ($GETH_DEV_GASLIMIT)
Initial block gas limit

--dev.period value (default: 0) ($GETH_DEV_PERIOD)
Block period to use in developer mode (0 = mine only if transaction pending)

ETH


--bloomfilter.size value (default: 2048) ($GETH_BLOOMFILTER_SIZE)
Megabytes of memory allocated to bloom-filter for pruning

--config value ($GETH_CONFIG)
TOML configuration file

--creeper (default: false) ($GETH_CREEPER)
Creeper test network

--datadir value (default: /Users/leegunchul/Library/OverProtocol) ($GETH_DATADIR)
Data directory for the databases and keystore

--datadir.ancient value ($GETH_DATADIR_ANCIENT)
Root directory for ancient data (default = inside chaindata)

--datadir.minfreedisk value ($GETH_DATADIR_MINFREEDISK)
Minimum free disk space in MB, once reached triggers auto shut down (default =
--cache.gc converted to MB, 0 = disabled)

--dolphin (default: false) ($GETH_DOLPHIN)
Dolphin test network

--eth.requiredblocks value ($GETH_ETH_REQUIREDBLOCKS)
Comma separated block number-to-hash mappings to require for peering
(<number>=<hash>)

--exitwhensynced (default: false) ($GETH_EXITWHENSYNCED)
Exits after block synchronisation completes

--mainnet (default: false) ($GETH_MAINNET)
Over Protocol mainnet

--networkid value (default: 0) ($GETH_NETWORKID)
Explicitly set network id (integer)(For testnets: use --creeper, --dolphin
instead)

--override.cancun value (default: 0) ($GETH_OVERRIDE_CANCUN)
Manually specify the Cancun fork timestamp, overriding the bundled setting

--override.verkle value (default: 0) ($GETH_OVERRIDE_VERKLE)
Manually specify the Verkle fork timestamp, overriding the bundled setting

--snapshot (default: true) ($GETH_SNAPSHOT)
Enables snapshot-database mode (default = enable)

GAS PRICE ORACLE


--gpo.blocks value (default: 20) ($GETH_GPO_BLOCKS)
Number of recent blocks to check for gas prices

--gpo.ignoreprice value (default: 2) ($GETH_GPO_IGNOREPRICE)
Gas price below which gpo will ignore transactions

--gpo.maxprice value (default: 500000000000) ($GETH_GPO_MAXPRICE)
Maximum transaction priority fee (or gasprice before London fork) to be
recommended by gpo

--gpo.percentile value (default: 60) ($GETH_GPO_PERCENTILE)
Suggested gas price is the given percentile of a set of recent transaction gas
prices

LIGHT CLIENT


--light.egress value (default: 0) ($GETH_LIGHT_EGRESS)
Outgoing bandwidth limit for serving light clients (deprecated)

--light.ingress value (default: 0) ($GETH_LIGHT_INGRESS)
Incoming bandwidth limit for serving light clients (deprecated)

--light.maxpeers value (default: 100) ($GETH_LIGHT_MAXPEERS)
Maximum number of light clients to serve, or light servers to attach to
(deprecated)

--light.nopruning (default: false) ($GETH_LIGHT_NOPRUNING)
Disable ancient light chain data pruning (deprecated)

--light.nosyncserve (default: false) ($GETH_LIGHT_NOSYNCSERVE)
Enables serving light clients before syncing (deprecated)

--light.serve value (default: 0) ($GETH_LIGHT_SERVE)
Maximum percentage of time allowed for serving LES requests (deprecated)

LOGGING AND DEBUGGING


--log.compress (default: false) ($GETH_LOG_COMPRESS)
Compress the log files

--log.file value ($GETH_LOG_FILE)
Write logs to a file

--log.format value ($GETH_LOG_FORMAT)
Log format to use (json|logfmt|terminal)

--log.maxage value (default: 30) ($GETH_LOG_MAXAGE)
Maximum number of days to retain a log file

--log.maxbackups value (default: 10) ($GETH_LOG_MAXBACKUPS)
Maximum number of log files to retain

--log.maxsize value (default: 100) ($GETH_LOG_MAXSIZE)
Maximum size in MBs of a single log file

--log.rotate (default: false) ($GETH_LOG_ROTATE)
Enables log file rotation

--log.vmodule value ($GETH_LOG_VMODULE)
Per-module verbosity: comma-separated list of <pattern>=<level> (e.g.
eth/*=5,p2p=4)

--nocompaction (default: false) ($GETH_NOCOMPACTION)
Disables db compaction after import

--pprof (default: false) ($GETH_PPROF)
Enable the pprof HTTP server

--pprof.addr value (default: "127.0.0.1") ($GETH_PPROF_ADDR)
pprof HTTP server listening interface

--pprof.blockprofilerate value (default: 0) ($GETH_PPROF_BLOCKPROFILERATE)
Turn on block profiling with the given rate

--pprof.cpuprofile value ($GETH_PPROF_CPUPROFILE)
Write CPU profile to the given file

--pprof.memprofilerate value (default: 524288) ($GETH_PPROF_MEMPROFILERATE)
Turn on memory profiling with the given rate

--pprof.port value (default: 6060) ($GETH_PPROF_PORT)
pprof HTTP server listening port

--remotedb value ($GETH_REMOTEDB)
URL for remote database

--trace value ($GETH_TRACE)
Write execution trace to the given file

--verbosity value (default: 3) ($GETH_VERBOSITY)
Logging verbosity: 0=silent, 1=error, 2=warn, 3=info, 4=debug, 5=detail

METRICS AND STATS


--ethstats value ($GETH_ETHSTATS)
Reporting URL of a ethstats service (nodename:secret@host:port)

--metrics (default: false) ($GETH_METRICS)
Enable metrics collection and reporting

--metrics.addr value ($GETH_METRICS_ADDR)
Enable stand-alone metrics HTTP server listening interface.

--metrics.expensive (default: false) ($GETH_METRICS_EXPENSIVE)
Enable expensive metrics collection and reporting

--metrics.influxdb (default: false) ($GETH_METRICS_INFLUXDB)
Enable metrics export/push to an external InfluxDB database

--metrics.influxdb.bucket value (default: "geth") ($GETH_METRICS_INFLUXDB_BUCKET)
InfluxDB bucket name to push reported metrics to (v2 only)

--metrics.influxdb.database value (default: "geth") ($GETH_METRICS_INFLUXDB_DATABASE)
InfluxDB database name to push reported metrics to

--metrics.influxdb.endpoint value (default: "http://localhost:8086") ($GETH_METRICS_INFLUXDB_ENDPOINT)
InfluxDB API endpoint to report metrics to

--metrics.influxdb.organization value (default: "geth") ($GETH_METRICS_INFLUXDB_ORGANIZATION)
InfluxDB organization name (v2 only)

--metrics.influxdb.password value (default: "test") ($GETH_METRICS_INFLUXDB_PASSWORD)
Password to authorize access to the database

--metrics.influxdb.tags value (default: "host=localhost") ($GETH_METRICS_INFLUXDB_TAGS)
Comma-separated InfluxDB tags (key/values) attached to all measurements

--metrics.influxdb.token value (default: "test") ($GETH_METRICS_INFLUXDB_TOKEN)
Token to authorize access to the database (v2 only)

--metrics.influxdb.username value (default: "test") ($GETH_METRICS_INFLUXDB_USERNAME)
Username to authorize access to the database

--metrics.influxdbv2 (default: false) ($GETH_METRICS_INFLUXDBV2)
Enable metrics export/push to an external InfluxDB v2 database

--metrics.port value (default: 6060) ($GETH_METRICS_PORT)
Metrics HTTP server listening port.
Please note that --metrics.addr must be set
to start the server.

MINER


--mine (default: false) ($GETH_MINE)
Enable mining

--miner.etherbase value ($GETH_MINER_ETHERBASE)
0x prefixed public address for block mining rewards

--miner.extradata value ($GETH_MINER_EXTRADATA)
Block extra data set by the miner (default = client version)

--miner.gaslimit value (default: 30000000) ($GETH_MINER_GASLIMIT)
Target gas ceiling for mined blocks

--miner.gasprice value (default: 0) ($GETH_MINER_GASPRICE)
Minimum gas price for mining a transaction

--miner.newpayload-timeout value (default: 2s) ($GETH_MINER_NEWPAYLOAD_TIMEOUT)
Specify the maximum time allowance for creating a new payload

--miner.recommit value (default: 2s) ($GETH_MINER_RECOMMIT)
Time interval to recreate the block being mined

MISC


--help, -h (default: false)
show help

--synctarget value ($GETH_SYNCTARGET)
Hash of the block to full sync to (dev testing feature)

--version, -v (default: false)
print the version

NETWORKING


--bootnodes value ($GETH_BOOTNODES)
Comma separated enode URLs for P2P discovery bootstrap

--discovery.dns value ($GETH_DISCOVERY_DNS)
Sets DNS discovery entry points (use "" to disable DNS)

--discovery.port value (default: 30303) ($GETH_DISCOVERY_PORT)
Use a custom UDP port for P2P discovery

--discovery.v4, --discv4 (default: false) ($GETH_DISCOVERY_V4)
Enables the V4 discovery mechanism

--discovery.v5, --discv5 (default: true) ($GETH_DISCOVERY_V5)
Enables the experimental RLPx V5 (Topic Discovery) mechanism

--identity value ($GETH_IDENTITY)
Custom node name

--maxpeers value (default: 50) ($GETH_MAXPEERS)
Maximum number of network peers (network disabled if set to 0)

--maxpendpeers value (default: 0) ($GETH_MAXPENDPEERS)
Maximum number of pending connection attempts (defaults used if set to 0)

--nat value (default: "any") ($GETH_NAT)
NAT port mapping mechanism (any|none|upnp|pmp|pmp:<IP>|extip:<IP>)

--netrestrict value ($GETH_NETRESTRICT)
Restricts network communication to the given IP networks (CIDR masks)

--nodekey value ($GETH_NODEKEY)
P2P node key file

--nodekeyhex value ($GETH_NODEKEYHEX)
P2P node key as hex (for testing)

--nodiscover (default: false) ($GETH_NODISCOVER)
Disables the peer discovery mechanism (manual peer addition)

--port value (default: 30303) ($GETH_PORT)
Network listening port

PERFORMANCE TUNING


--cache value (default: 1024) ($GETH_CACHE)
Megabytes of memory allocated to internal caching (default = 1024 mainnet full
node, 128 light mode)

--cache.blocklogs value (default: 32) ($GETH_CACHE_BLOCKLOGS)
Size (in number of blocks) of the log cache for filtering

--cache.database value (default: 50) ($GETH_CACHE_DATABASE)
Percentage of cache memory allowance to use for database io

--cache.gc value (default: 25) ($GETH_CACHE_GC)
Percentage of cache memory allowance to use for trie pruning (default = 25% full
mode, 0% archive mode)

--cache.noprefetch (default: false) ($GETH_CACHE_NOPREFETCH)
Disable heuristic state prefetch during block import (less CPU and disk IO, more
time waiting for data)

--cache.preimages (default: false) ($GETH_CACHE_PREIMAGES)
Enable recording the SHA3/keccak preimages of trie keys

--cache.snapshot value (default: 10) ($GETH_CACHE_SNAPSHOT)
Percentage of cache memory allowance to use for snapshot caching (default = 10%
full mode, 20% archive mode)

--cache.trie value (default: 15) ($GETH_CACHE_TRIE)
Percentage of cache memory allowance to use for trie caching (default = 15% full
mode, 30% archive mode)

--crypto.kzg value (default: "gokzg") ($GETH_CRYPTO_KZG)
KZG library implementation to use; gokzg (recommended) or ckzg

--fdlimit value (default: 0) ($GETH_FDLIMIT)
Raise the open file descriptor resource limit (default = system fd limit)

STATE HISTORY MANAGEMENT


--epochlimit value (default: 2) ($GETH_EPOCHLIMIT)
Number of epochs to retain in database (default = 2, 0 = no limit)

--gcmode value (default: "full") ($GETH_GCMODE)
Blockchain garbage collection mode, only relevant in state.scheme=hash ("full",
"archive")

--history.disable (default: false) ($GETH_HISTORY_DISABLE)
Disable state history tracking

--history.state value (default: 90000) ($GETH_HISTORY_STATE)
Number of recent blocks to retain state history for (default = 90,000 blocks, 0
= entire chain)

--history.transactions value (default: 2350000) ($GETH_HISTORY_TRANSACTIONS)
Number of recent blocks to maintain transactions index for (default = about one
year, 0 = entire chain)

--state.scheme value ($GETH_STATE_SCHEME)
Scheme to use for storing kairos state ('hash' or 'path')

--syncmode value (default: snap) ($GETH_SYNCMODE)
Blockchain sync mode ("snap" or "full")

TRANSACTION POOL (BLOB)


--blobpool.datacap value (default: 10737418240) ($GETH_BLOBPOOL_DATACAP)
Disk space to allocate for pending blob transactions (soft limit)

--blobpool.datadir value (default: "blobpool") ($GETH_BLOBPOOL_DATADIR)
Data directory to store blob transactions in

--blobpool.pricebump value (default: 100) ($GETH_BLOBPOOL_PRICEBUMP)
Price bump percentage to replace an already existing blob transaction

TRANSACTION POOL (EVM)


--txpool.accountqueue value (default: 64) ($GETH_TXPOOL_ACCOUNTQUEUE)
Maximum number of non-executable transaction slots permitted per account

--txpool.accountslots value (default: 16) ($GETH_TXPOOL_ACCOUNTSLOTS)
Minimum number of executable transaction slots guaranteed per account

--txpool.globalqueue value (default: 1024) ($GETH_TXPOOL_GLOBALQUEUE)
Maximum number of non-executable transaction slots for all accounts

--txpool.globalslots value (default: 5120) ($GETH_TXPOOL_GLOBALSLOTS)
Maximum number of executable transaction slots for all accounts

--txpool.journal value (default: "transactions.rlp") ($GETH_TXPOOL_JOURNAL)
Disk journal for local transaction to survive node restarts

--txpool.lifetime value (default: 3h0m0s) ($GETH_TXPOOL_LIFETIME)
Maximum amount of time non-executable transaction are queued

--txpool.locals value ($GETH_TXPOOL_LOCALS)
Comma separated accounts to treat as locals (no flush, priority inclusion)

--txpool.nolocals (default: false) ($GETH_TXPOOL_NOLOCALS)
Disables price exemptions for locally submitted transactions

--txpool.pricebump value (default: 10) ($GETH_TXPOOL_PRICEBUMP)
Price bump percentage to replace an already existing transaction

--txpool.pricelimit value (default: 1) ($GETH_TXPOOL_PRICELIMIT)
Minimum gas price tip to enforce for acceptance into the pool

--txpool.rejournal value (default: 1h0m0s) ($GETH_TXPOOL_REJOURNAL)
Time interval to regenerate the local transaction journal

VIRTUAL MACHINE


--vmdebug (default: false) ($GETH_VMDEBUG)
Record information useful for VM and contract debugging

- + \ No newline at end of file diff --git a/operators/faqs.html b/operators/faqs.html index a53b9c2..132e19b 100644 --- a/operators/faqs.html +++ b/operators/faqs.html @@ -4,13 +4,13 @@ FAQs | OverProtocol Docs - +

FAQs

1. What does a validator do?

OverProtocol utilizes Proof of Stake (PoS) to reach consensus on the blockchain. In this system, randomly selected validators propose, prove, and guarantee the validity of blocks.

Validators of OverProtocol contribute to the blockchain by participating in block validation, block creation, and synchronization proof, and receive rewards accordingly.

2. What is block attestation, block proposal, and sync-committee?

Block attestation, block proposal, and sync committee are roles that validators in OverProtocol must perform. All validators participate in block attestation, and a randomly selected group of validators participates in block proposal and sync-committee. Validators who participate in block proposal and sync-committee can get more rewards if they perform their roles well.

3. How can i run a validator?

If your are a OverNode user, you can start the validator application in the Validator menu. Follow the instructions on the screen to register as a validator. If you are choosing to run the client software yourself, refer to Operate Validators page.

You must stake 256 OVER(for testing) per validator, and you can apply for more validators depending on the OVER you have.

If you have completed the validator application through staking, it may take some time for the applied validator to be activated. The waiting time becomes longer as the number of validator applications from users increases.

Before validator activation, no separate rewards or penalties apply to validator operation. However, after the validator is activated, you must always keep the node running to receive rewards.

4. Validator activation takes a long time.

When staking for validator operation, a waiting time is required until the staking transaction is processed and validator registration is completed. Validator activation waiting time can be divided into two stages: primary and secondary.

  • Primary waiting: It is used to calculate the time it takes for the validator to be activated for about the first 2 hours. It is estimated to take about 2 hours, but the exact waiting time cannot be determined.

  • Secondary waiting: The exact waiting time until validator activation can be known and confirmed in the Validator menu. Once the validator is activated, you can participate in the network immediately.

In OBT #2, after successfully sending the staking transaction, depending on the network status and the number of validator operation applications, a waiting time of at least 2 hours to up to 3 days may occur.

If multiple users apply for validator operation, the secondary waiting time may increase significantly. Therefore, during this time, it is recommended to leave OverNode running in the background and keep the computer on.

If the node was turned off due to the waiting time, please make sure to turn on the node and complete synchronization to the completion state before the validator activation time. If the node is still off at the time of validator activation, you will immediately receive penalties.

5. I forgot the validator recovery phrase.

OverProtocol doesn't store any of user's passwords. If you forgot the validator Recovery phrase, you can't recover your access towards the active validator.

- + \ No newline at end of file diff --git a/operators/operate-restoration-client.html b/operators/operate-restoration-client.html index 6dd48ed..99a7e65 100644 --- a/operators/operate-restoration-client.html +++ b/operators/operate-restoration-client.html @@ -4,13 +4,13 @@ Operate Restoration Client | OverProtocol Docs - +

Operate Restoration Client

To restore an expired account, you need to retrieve the proof of historical state. This requires running an execution client that stores historical state data. By operating both the execution client and the restoration client, you can help users restore expired accounts and receive additional rewards.

How to run a restoration client

Restoration client is controlled using the command line. Here’s how to set it up:

restoration --help                                                                                                             
Usage of restoration:
-corsdomain string
Comma separated list of domains from which to accept cross origin requests (browser enforced) (default "*")
-ipc string
The ipc endpoint of a local geth node
-keystore string
Directory for the keystore (default = inside the datadir)
-minimum-reward string
Minimum reward for sending restoration transaction (default "1000000000000000000")
-passphrase string
Passphrase file for unlocking signer account
-port string
Server listening port (default ":32311")
-rpc string
The rpc endpoint of a local or remote geth node
-signer string
Signer address for signing restoration transaction and receiving reward
caution

The execution client must be synced with full sync mode and store an unlimited number of epochs.

$ geth --syncmode full --epochLimit 0
- + \ No newline at end of file diff --git a/operators/operate-validators.html b/operators/operate-validators.html index 2efed9d..ae5cd3d 100644 --- a/operators/operate-validators.html +++ b/operators/operate-validators.html @@ -4,7 +4,7 @@ Operate Validators | OverProtocol Docs - + @@ -14,7 +14,7 @@ The execution layer's account needs 256 OVER per validator account it tries to enroll.

Then you should run the following-styled code in your machine to sender deposit transactions the with the validator keys generated in step 2. The deposit contract's address is set to 0x000000000000000000000000000000000beac017 and the deposit contract ABI is set as the following link: DepositContract.abi.json.

const { ethers } = require("ethers"); // ethers.js v5

const provider = new ethers.providers.JsonRpcProvider(
"http://127.0.0.1:22000"
); // RPC port of Kairos

const depositContractAddress = '0x000000000000000000000000000000000beac017';
const depositContractABI = require('./DepositContract.abi.json');

// Replace these with your own values
async function stake(privateKey) {
const wallet = new ethers.Wallet(privateKey, provider);

const stakingContract = new ethers.Contract(
depositContractAddress,
depositContractABI,
wallet
);

const amount = ethers.utils.parseEther("256");

let depositDatas;
depositDatas = require("./deposit_data.json"); // The validator key you've generated from step 2.

for (let i = 0; i < depositDatas.length; i++) {
const tx = await stakingContract.deposit(
"0x" + depositDatas[i].pubkey,
"0x" + depositDatas[i].withdrawal_credentials,
"0x" + depositDatas[i].signature,
"0x" + depositDatas[i].deposit_data_root,
{
value: amount,
gasLimit: 2000000,
}
);

try {
const receipt = await tx.wait();
console.log(`Transaction ${i + 1}:`);
console.log(`Transaction Hash: ${receipt.transactionHash}`); // Search by transaction hash at OverView
} catch (error) {
console.error(`Error in transaction ${i + 1}: ${error.message}`);
}
}
}

stake(YOUR_PRIVATE_KEY_WITH_0x_PREFIX)

If you've succeeded in registering your validator to the blockchain, you should now run your validator software. Follow steps 4 and 5.

Run your validator

Transfer Validator Keys

Run validator client to import the validator keys with the command similar to the following:

$ validator accounts import --keys-dir=<path/to/your/validator/keys> --dolphin --wallet-dir=<path/to/your/wallet/directory>

If you successfully imported validator keys, the result will be:

Importing accounts, this may take a while...
Importing accounts... 100% [==========================================================] [0s:0s]
[2024-06-04 15:41:33] INFO local-keymanager: Successfully imported validator key(s) publicKeys=<YOUR_VALIDATOR_PUBKEYS>
[2024-06-04 15:41:33] INFO accounts: Imported accounts <YOUR_VALIDATOR_PUBKEYS>, view all of them by running `accounts list`

Run your Validator

Run validator client to run the validator on your node like following:

$ validator --wallet-dir=<path/to/your/wallet/directory> --dolphin --suggested-fee-recipient=<YOUR_WALLET_ADDRESS>

--suggested-fee-recipient will allow you to earn block priority fees. If no --suggested-fee-recipient is set neither on the validator client nor on the beacon node, the corresponding fees will be sent to the burn address, and forever lost.

More on Validator Activation

Activation Queue

Once your Execution Layer and Consensus Layer are synchronized and your deposit transaction successfully executed, your validator will enter the activation queue. This is a necessary step before becoming an active validator. Due to network protocols, the activation process can take 24 hours or more, depending on the queue. The system allows for only 900 new validators to join per day to maintain network stability and manage the rate of growth.

Activated

Upon activation, your validator will begin participating in the creation and validation of blocks. This active involvement in the network functions allows your validator to start earning staking rewards. These rewards are compensation for your contributions to network security and operability, incentivizing ongoing participation and support of the network's integrity.

Withdrawal Process (Quitting the validator status)

Initiating a Voluntary Exit

For users who decide to cease staking and wish to withdraw their entire balance, a "voluntary exit" process must be initiated. This involves signing and broadcasting a voluntary exit message using your validator keys. The process is facilitated by your validator client and must be submitted to your beacon node. Importantly, this action does not require any gas fees, as it is a part of the consensus layer's functionality. You will have to rely on the following-like command:

$ prysmctl validator exit --wallet-dir=<path/to/your/wallet/directory> --beacon-rpc-provider=<127.0.0.1:4000>

Alternatively, you can use Bazel to initiate a voluntary exit from the source as follows:

$ bazel run //cmd/prysmctl -- validator exit --wallet-dir=<path/to/your/wallet/directory> --beacon-rpc-provider=<127.0.0.1:4000> 

Exiting Process

The time it takes for a validator to exit from staking can vary significantly depending on the number of other validators undergoing the same process simultaneously. After this period, your validator will no longer be eligible for validator duties, including block creation and voting.

Post-Exit Status

Once the exit process is complete, the validator's account status changes in several key ways:

  • No Longer Active: The account will no longer perform any network duties as a validator.
  • Ineligibility for Rewards: The account ceases to earn staking rewards.
  • Removal of Stake: The staked OVER tokens are no longer considered "at stake."
  • Full Withdrawal: After some epochs, the staked OVER tokens will be withdrawn to the address set in the deposit data.
- + \ No newline at end of file diff --git a/operators/run-a-node.html b/operators/run-a-node.html index 47b4004..d1c25be 100644 --- a/operators/run-a-node.html +++ b/operators/run-a-node.html @@ -4,7 +4,7 @@ Run a Node | OverProtocol Docs - + @@ -15,7 +15,7 @@ You can follow the Setting up Validators to run a validator client.

Network Configurations

OverProtocol Testnet Configuration

tip

When working with OverProtocol, especially in a testnet environment, it's important to note that testnet configurations and details may change at any time. This variability is typical of test environments, which are often updated or reset to test new features and improvements in the blockchain protocol.

Dolphin Testnet

The Dolphin testnet operates with the goal of providing an environment identical to that of the mainnet. Additionally, this testnet serves the role of applying and testing updates before they are implemented on the mainnet.

KeyValue
NetworkOverProtocol Dolphin
RPC URLYOUR_RPC_URL
Chain ID541762
Currency symbolOVER
Block Explorer URLhttps://dolphin.view.over.network/
SweepEpoch648000 (about 90 days)

Node Types

OverProtocol supports several types of nodes, each serving distinct functions within the network:

  • Full Nodes: Primarily used for querying data and interacting with the blockchain, full nodes maintain only the Over Layer of the blockchain. Setting up a node with default configurations will typically result in a full node.
  • Archive Nodes: These nodes store the complete state of the blockchain from its genesis. Due to the extensive historical data they retain, archive nodes generally require significant disk space.
  • Validator Nodes: Essential for the security and integrity of the blockchain, validator nodes participate in proposing and voting on blocks. They play a critical role in maintaining the blockchain's consensus mechanism.

Each node type is integral to the network’s functionality, offering different capabilities and requiring varying levels of resource commitment. Depending on your participation goals and available resources, you can choose the node type that best fits your needs in supporting and engaging with the OverProtocol ecosystem.

Synchronization Modes

Synchronization process is critical for ensuring that a node in the OverProtocol network is up-to-date with the latest blockchain state. This process involve downloading data from peers, verifying its integrity, and building a local blockchain database. Given the separation of data into the execution layer and the consensus layer in OverProtocol, each layer employs distinct synchronization strategies to manage data effectively.

These synchronization modes provide different trade-offs between speed, disk usage, network bandwidth, and security. Choosing the right sync mode for a node depends on the node operator’s specific requirements, including their security preferences, hardware capabilities, and how quickly they need their node to be fully operational.

Execution Layer Sync Modes

In the execution layer, there are two primary synchronization modes to become a full node: Full Sync and Snap Sync. OverNode users can easily select the execution sync modes, upon blockchain data download.

Full Sync:

  • This mode involves downloading all blocks, including headers, transactions, and receipts, from the genesis block onward.
  • It generates the state of the blockchain incrementally by executing every transactions.
  • This method minimizes trust as it verifies every transaction independently, providing the highest level of security.
  • Due to the comprehensive nature of the data processing involved, this sync can take days, depending on the number of transactions in the blockchain’s history.
$ geth --syncmode full

Snap Sync:

  • This mode starts from a more recent "trusted" checkpoint rather than the genesis block.
  • This mode leverages periodic snapshots of the blockchain state, allowing the node to regenerate necessary state data on demand rather than maintaining a complete historical state database.
  • It is the fastest synchronization strategy and is the default setting on networks.
  • This mode significantly reduces disk usage and network bandwidth requirements.
$ geth --syncmode snap

Becoming an Archive Node:

There is an option to become an archive node. Currently, there is no option for OverNode users to become an archive node. Client software runners could become an archive node by running the execution client with the following tag:

$ geth --gcmode archive

If the combination is geth --syncmode full --gcmode archive then all blockchain data from the genesis block is written down in the database. If the combination is geth --syncmode snap --gcmode archive the blockchain data from the trusted checkpoint.

Choose the Number of Epochs to Store:

By default the OverProtocol client runs with minimum storage usage. You can change the epochLimit flag to change how many checkpoints to store.

$ geth --epochLimit X

This stores checkpoints states until X epoch to the past (default is set to minimum, which is 2).

$ geth --epochLimit 0

This stores all inactive data in addition to active and staged data.

Normally default setting is enough, but saving previous checkpoints can be usefull when you want to retrieve states from preivous epochs.

Consensus Layer Sync Modes

There are two ways to sync the consensus layer: initial sync, and checkpoint sync. OverNodes users can only choose to sync consensus layer through checkpoint sync as it is set by default. The chronos client software runners can choose between the two sync modes.

Initial sync:

  • This sync mode downloads the beacon chain data, if it has lower head information than its peers.
  • When bootstrapping a node, it has no beacon chain data, so it downloads all the beacon chain data starting from the genesis.

Checkpoint sync (Initial sync from checkpoint):

  • This mode enhances the user experience by allowing consensus layer to sync from a recent weak subjectivity checkpoint instead of from the genesis block.
  • This approach drastically reduces the initial sync time.
  • The source of the checkpoint data is crucial and should be chosen with care, as the node will inherently trust the third party providing the data.
  • Append the following tags to enable the checkpoint sync
$ beacon-chain --checkpoint-sync-url value

What's Next

Once your node is up, running and synced, the next step is to register and operate validators. This involves configuring your node to participate in the consensus process, enhancing the network's security and stability.

For OverNode users this step is pretty much straight-forward. After the node is synced, jump in to the Staking tab to register as a validator.

For advanced users running the client software from scratch follow register and operate validators section.

- + \ No newline at end of file diff --git a/operators/system-requirements.html b/operators/system-requirements.html index 2b7ea71..7c4ac36 100644 --- a/operators/system-requirements.html +++ b/operators/system-requirements.html @@ -4,13 +4,13 @@ System Requirements | OverProtocol Docs - +

System Requirements

Environment

When deciding how to deploy an OverProtocol node, you are presented with two primary options: running it on a local physical machine or using a cloud server. Each approach has distinct benefits and drawbacks, influenced by factors such as your technical expertise, budget, and priorities for privacy and control.

OverProtocol recommends using a local machine to participate in the network. A censorship-resistant, decentralized network ideally should not depend on cloud providers. Operating your node on your own local hardware aligns with the decentralized ethos of blockchain technology.

OverProtocol's node client can operate on various types of hardware, including personal computers, laptops, dedicated servers. While it's feasible to run the node client on your regular personal computer, using a dedicated machine specifically for your node can significantly boost its performance and enhance security.

Requirements

Although OverProtocol does not demand extensive computing power, maintaining a continuous connection to the blockchain network is essential. The system involves frequent data transfers and continuous reading and writing operations. Therefore, using higher-quality hardware and ensuring a stable internet connection can significantly enhance the performance of the client.

Minimum Hardware Requirements

To participate as a node in OverProtocol, the minimum hardware requirements are relatively modest. However participating in a blockchain system is input/output intensive. Therefore, securing a solid-state drive (SSD) is crucial for efficient data handling and quick access to blockchain records.

  • CPU: Dual-core or higher
  • Memory: 8GB RAM
  • Storage: SSD with at least 50GB available space
  • Network: 8MBit/sec download speed

For an optimal experience and enhanced performance, particularly for validators who require more from their systems, the following specifications are recommended:

  • CPU: Fast CPU with 4 or more cores
  • Memory: 16GB RAM or more
  • Storage: SSD with at least 128GB available space
  • Network: 25+ MBit/s bandwidth

By meeting or exceeding these recommended specifications, you can ensure that your node operates efficiently, contributes positively to the network, and minimizes the risk of downtime or performance issues.

- + \ No newline at end of file