diff --git a/.github/workflows/check_string.yml b/.github/workflows/check_string.yml new file mode 100644 index 0000000..7e78066 --- /dev/null +++ b/.github/workflows/check_string.yml @@ -0,0 +1,21 @@ +name: Check for String + +on: [push, pull_request] + +jobs: + check_string: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v2 + + - name: Check for Deepl string + run: | + if grep -R "Deepl" . ; then + echo "String 'Deepl' found" + exit 1 + else + echo "String 'Deepl' not found" + fi + diff --git a/README.md b/README.md index d37db53..ce33a64 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -## Caveats of Hugo +## Caveats of HUGO - When you have an article under content that is in form of markdown page and it has a "date" value set in the future it will NOT render out. @@ -15,4 +15,20 @@ bundling code is visible. Add new files to the bundle there ## Template This site uses the Candy Flat Theme. -(Internal: It is in the company Shared Drive.) \ No newline at end of file +(Internal: It is in the company Shared Drive.) + +## Multi-Language +HUGO offers two forms of multi-language support: +- Filename extension based +- subfolder based +See detailed documentation here: https://gohugo.io/content-management/multilingual/ + +We decided to implement the filename extension route. Reason being that it is quicker to determine if an article +has already been translated by just seeing if a same-named copy is located next to it in the filesystem tree. +Also the language coding for layouts still works through filename extensions and it is more cohrent to have +content AND layouts use the same mechanism. + +When localizing a template in the `layouts` folder there are two options: +- Creating a new template with the `.de.md` extension. (Beware, you must restart the `hugo server` for it to show. It does not support hot reload) +- Using the `i18n` feature of *HUGO*. All strings are in the *i18n* folder either in the *en.yaml* or *de.yaml*. + + See documentation here: https://gohugo.io/content-management/multilingual/#translation-of-strings \ No newline at end of file diff --git a/assets/css/layout.css b/assets/css/layout.css index 6a0320e..d8bfc79 100644 --- a/assets/css/layout.css +++ b/assets/css/layout.css @@ -24,12 +24,12 @@ ================================================== */ /* Smaller than standard 960 (devices and browsers) */ - @media only screen and (max-width: 1150px) { + @media only screen and (max-width: 1250px) { .header-container {padding: 0 30px;} } - @media only screen and (max-width: 959px) { + @media only screen and (max-width: 1050px) { #ancla1,#ancla2,#ancla3,#ancla4,#ancla5,#ancla6{position: absolute; top: -100px;} .menu-select{display: none;} .profile1{font-size: 22px !important;} diff --git a/config.toml b/config.toml index 8e17bf9..c8bacf8 100644 --- a/config.toml +++ b/config.toml @@ -1,12 +1,31 @@ baseURL = 'https://www.green-coding.io/' -languageCode = 'de-en' title = 'green-coding.io' +defaultContentLanguageInSubdir = false + paginate = 10 paginatePath = "page" summaryLength = 30 +defaultContentLanguage = "en" +[languages] + [languages.en] + disabled = false + languageCode = "de-EN" + languageDirection = 'ltr' + languageName = 'English' + weight = 1 + + [languages.de] + languageCode = "de-DE" + disabled = false + languageDirection = 'ltr' + languageName = 'Deutsch' + weight = 2 + + + # Minify removes also the "" (quotes) from the HTML tags. This breaks bing site validation and # also maybe some SEO features. [minify] diff --git a/content/_index.md b/content/_index.md deleted file mode 100644 index 1f578fc..0000000 --- a/content/_index.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -title: "Green Coding Berlin - Sustainable Software Engineering" -draft: false -description: "We are helping developers and companies understand and optimize the carbon emissions of their software and digital infrastructure" -date: 2022-07-21 08:00:00 -# This page has no content and everything is found in layouts/home.html as it contains range iterators ---- diff --git a/content/about.de.md b/content/about.de.md new file mode 100644 index 0000000..4339434 --- /dev/null +++ b/content/about.de.md @@ -0,0 +1,171 @@ +--- +title: "Nachhaltige Software" +draft: false +description: "Finden Sie heraus, was wir bei Green Coding tun und warum die Auswirkungen von Software auf die Umwelt wichtig sind" +date: 2022-07-21 08:00:00 +author: "Arne Tarara" +authorlink: "https://de.linkedin.com/in/arne-tarara" +translationKey: about + +--- + +Software wird immer CO2 emissionen haben. Genauso wie jede Technologie, selbst die Solarenergie, CO2 in der Herstellung produziert und das menschliche Leben auch. + +Wir glauben jedoch, dass die Nutzung von Software der Weg zu mehr Umweltfreundlichkeit und Nachhaltigkeit ist und dass die Digitalisierung jedes Sektors ein Baustein zur Bekämpfung des Klimawandels ist. + +Wie in jedem technologischen Bereich ist das Messen und Verstehen des eigenen Verbrauchs der wichtigste Schritt, um eine Aufgabe anzugehen. + +Der nächste Schritt besteht darin, die Optimierungspotenziale zu verstehen. Manchmal sind dies sehr einfache Sachen wie [grünes Hosting] (https://www.thegreenwebfoundation.org/). + +In anderen Fällen geht es um Änderungen an Ihrer Infrastruktur, Ihrem Code oder Ihren Entwicklungsabläufen. + +Der wichtigste Schritt ist die Sensibilisierung und Aufklärung der Entwickler über den Energieverbrauch als einen wichtiger Baustein, der bei der Entwicklung von Software-Architekturen zu berücksichtigen ist. + +Dies wird wiederum die Nachfrage nach den Metriken erhöhen und die Bereitstellung durch die Akteure der Branche vorantreiben. + + +{{< greenblock >}} +Software-Nutzung +{{}} + +Um Berechnungen durchführen zu können, muss die Software die zugrunde liegende Hardware nutzen und verbraucht daher Energie. + +Der Energieverbrauch ist oft nicht einfach zu ermitteln. Wenn Sie CPU-/GPU-intensive Lasten haben, wie beim [High Performance Computing] (https://de.wikipedia.org/wiki/Hochleistungsrechnen) +oder beim Bitcoin-Mining, ist der Energieverbrauch oft identisch mit der thermischen Leistung Ihres Chips. + +Bei der alltäglichen Nutzung von Software ist der Fall jedoch ganz anders. Viele Faktoren tragen zu dem oft verschwenderischen Energieverbrauch von Software bei und sind oft von der reinen Zeit, in der eine Software läuft, entkoppelt: + +#### Boot-Zeit + +Moderne Software läuft virtualisiert oder containerisiert. Diese virtuellen Maschinen müssen gebootet werden. +Die Kosten für dieses ständige Hoch- und Runterfahren sind für den Benutzer und oft auch für den Entwickler nicht sichtbar. + +#### Infrastruktur + +Dieser Punkt ist mit der Boot-Zeit verwoben, hat aber genug Potenzial, um gesondert erwähnt zu werden. +Moderne Software wird zumindest in einem Hypervisor virtualisiert, wenn nicht sogar in einer VM oder einem zusätzlichen Container. Diese Technik löst viele Probleme in Bezug auf die Verschwendung von Ressourcen, bringt aber auch Herausforderungen wie Overhead mit sich. +Die Infrastruktur ist oft suboptimal gewählt, überdimensioniert oder zu undurchsichtig, um sie zu analysieren. + +#### Hintergrundaktivitäten + +Nach der Benutzerinteraktion muss die Software Hintergrundaufgaben erledigen. Sei es Cronjobs, ML-Training, Stream-Verarbeitung usw. + +#### Leerlaufzeit + +Auch im Zeitalter der Virtualisierung haben Maschinen oft erhebliche Leerlaufzeiten. In der Benutzerlandschaft wird dies oft mit Schlafmodi kompensiert. In der Server Welt gibt es typischerweise keine Energiesparmodi. + +#### Prozessenergie + +Software kann so schnell sein, dass es nicht wahrnehmbar ist, ob die Nutzung 10ms oder 100ms betrug. Dennoch kann der Energieunterschied enorm sein und ist schwer abzuschätzen, wenn er hochskaliert wird. + + +### Unsere Arbeit + +Um all diese getrennten Bereiche für die EntwicklerInnen sichtbar zu machen, entwickeln wir Open-Source-Tools, die den Energieverbrauch sichtbar machen. + +Für nutzerorientierte Anwendungen in der Desktop-, Web- und mobilen Welt entwickeln wir ein Tool zur Messung des gesamten Anwendungslebenszyklus auf der Grundlage eines eines Standardnutzungsszenarios: Das [Green Metrics Tool] (https://github.com/green-coding-berlin/green-metrics-tool) + +Durch die Verknüpfung von der Version des Codes und eines Nutzungsszenarios können wir verschiedene Software miteinander vergleichbar machen. + +In Cloud-Umgebungen mit verteilten Architekturen ist die Verwendung eines Benchmarking-Tools sehr umständlich und nicht wirklich sinnvoll. + +Hier werden Inline-Messungen benötigt, die in bereits existierende Beobachtungslösungen exportiert werden können, und auch vorausschauende Schätzungen, die architektonische Optimierungen vorantreiben können (z. B. welcher Dienst zu verwenden ist oder ob ein Wechsel zu Serverless von Vorteil ist). + +Anhand von Fallstudien wollen wir auch die Overhead-Kosten der Virtualisierung aufzeigen und Metriken bereitstellen, um fundierte Entscheidungen zu treffen, welches Architekturmodell am besten in eine energiebewusste Unternehmenskultur passt. + +Sehen Sie sich auch [unsere Projekte](/de/#projects) + +{{< whiteblock >}} +Entwicklung von Software +{{}} + +Bei der Entwicklung von Software werden auch VMs ständig hoch- und runtergefahren. Darüber hinaus durchläuft die Software in einem typischen Entwicklungsprozess die Software eine Continuous Integration (CI) Pipeline durchläuft und alle Tests vollständig ausgeführt werden. + +Dieser Prozess ist oft sehr intransparent und die wahren Kosten bleiben dem Entwickler verborgen, da er in der Cloud oder auf +spezialisierten SAAS-Plattformen stattfindet. + +Das Verständnis der Kosten für die Erstellung von Software und die potenziellen Einsparungen bei der Stapelverarbeitung oder sogar der Aufteilung der der Pipeline, um nur relevante Teile auszuführen, kann enorm sein. + +### Unsere Arbeit + +Messen ist der erste Schlüssel zum Verstehen, daher liegt unser Hauptaugenmerk hier auf die Sichtbarmachung und Aufklärung über die Energiekosten bei der Entwicklung von Software. + +Wir entwickeln Inline-Plugins für Github Actions und Badges, die andere Leute darauf aufmerksam machen, wie viel die Entwicklung eines Tools kostet. + +Außerdem konzentrieren wir uns auf den Aufbau von statischen Websites, die in der Regel weniger Resourcen verbrauchen, aber auch gebaut werden müssen. + +Schauen Sie sich unser [OpenEnergyBadge Projekt](/de/projects/open-energy-badge), unsere [Eco CI-plugins for Github](/de/projects/eco-ci) oder unsere [Case Studies](/case-studies/) zu diesem Thema an. + +Beispiel Abzeichen: {{< rawhtml >}} {{< /rawhtml >}} + + +{{< greenblock >}} +Netzwerke +{{}} + +Netzübertragungen rücken als einer der Hauptverursacher von Software-Emissionen immer mehr in Focus als einer der +Hauptverursacher der CO2 Emmisonen von Software zu sein. + +Großartige Tools wie [Websitecarbon.com](https://www.websitecarbon.com/) ermöglichen die Sichtbarkeit der Kosten für eine typische +Website-Anfrage. + +Heutzutage wird dies in der Regel durch das Herunterbrechen der Komplexität in eine sehr fehleranfällige Formel erreicht. Siehe unseren +Artikel über [CO2-Formeln](/co2-formulas/) für weitere Einzelheiten. + +Die Problematik der Netzwerkemissionen liegt nicht in ihrer Existenz, sondern in ihrer Unsichtbarkeit. + +In der Anfangszeit des Internets wurde die Netzübertragung sehr genau gemessen, da danach abgerchnet wurde. Durch die Einführung von Flatrates +konnte das Internet seine Akzeptanz erhöht und wurde allgegenwärtig. + +Flatrates haben jedoch zu einer Abkopplung vom Netzwerkverkehr und den Emmissonen geführt. +Dies führt zu einer Entkopplung und dem Missverständnis, dass jede Nutzung des Netzes irgendwann +zu einem bestimmten Zeitpunkt zu einem Anstieg der Kohlenstoffemissionen führt. Manchmal linear, manchmal in Stufen. (Siehe [Gigabytes zu kWh](/de/co2-formulas/#gigabytes-to-kwh)) + +Da Sie Ihren Verbrauch nicht sehen, wie zum Beispiel bei Ihrer Telefonrechnung, gehen Sie das Risko die Resource nicht mehr zu beachten. Das Gleiche gilt für das Essen am Buffet, wo Reste die Regel sind. + +Die Lösung liegt unserer Meinung nach darin, diese Ressource sichtbarer zu machen und einen nachhaltigeren Umgang mit ihr zu finden. + + +### Unsere Arbeit +Wir befassen uns derzeit mit diesem Thema, indem wir Entwickler durch [Meetups](/meetups-and-events/) und durch +Vorträge auf Konferenzen oder Coding Bootcamps wie [WBS Coding School](https://www.wbscodingschool.com/) + +Auf der technischen Seite heben wir die Netzwerkemissionen in unserem [Green Metrics Tool](https://github.com/green-coding-berlin/green-metrics-tool) hervor und zeigen Empfehlungen wie: + +- Verlagerung von Mobilfunk- zu Festnetzanschlüssen +- Demand Shifting, um Netzwerkanfragen zu CO2 freundlichen Zeiten zu erledigen +- Verwendung von Kompressionstechniken +- Änderung von Verbindungsformaten wie HTTP -> HTTP2 +- NICHT liefern von Inhalten, die keinen großen Nutzen haben +- Verringerung der Aktualisierungszyklen und Abfrageintervalle + +{{< whiteblock >}} +Gebundenes CO2 +{{}} + +Um Software zu betreiben, braucht man Hardware. Diese Hardware muss hergestellt werden und ist oft der Hauptverursacher von CO2 emissionen. + +Bei Verbrauchergeräten wie Smartphones stammen typischerweise [über 90 % der gesamten CO2 emissionen aus der Herstellung] (https://greensoftware.foundation/articles/sustainable-systems-user-hardware-and-sustainability) + +Bei Servern sieht die Sache anders aus, und hier ist es [ungefähr umgekehrt] (https://i.dell.com/sites/csdocuments/CorpComm_Docs/en/carbon-footprint-poweredge-r740xd.pdf) (wenn Sie mit grünem Strom betrieben werden). + + +### Unsere Arbeit + +Wir verwenden offizielle Datenbanken wie die [NegaOctet](https://negaoctet.org/) eine Datenbank aus Frankreich, um das gebunde CO2 der zugrunde liegenden Hardware zu ermitteln. + +Ein weiterer Ansatz, den wir verfolgen, ist das Konzept der *Digital Resource Primitives*, das von der [SDIA](https://knowledge.sdialliance.org/digital-environmental-footprint) entwickelt wurde. + +Das Konzept kennzeichnet eine Ressource als blockiert, wenn sie von einer Software verwendet wird, und kann so die CO2 auswirkungen der Herstellung der Zeit zuordnen, in der sie verwendet wird, was die Software besser vergleichbar macht. + +{{< greenblock >}} +Interessiert? +{{}} + +{{< rawhtml >}} +
+ Kontaktieren Sie uns +
+
+
Weitere Beispiele finden Sie auf unserer Seite Services.
+{{}} diff --git a/content/about.md b/content/about.md index 1d1240b..aede770 100644 --- a/content/about.md +++ b/content/about.md @@ -5,6 +5,7 @@ description: "Find out what we do at Green Coding and why the carbon impact of s date: 2022-07-21 08:00:00 author: "Arne Tarara" authorlink: "https://de.linkedin.com/in/arne-tarara" +translationKey: about --- @@ -13,8 +14,8 @@ and human life does too. However we believe that using software is the way to be more green and more sustainable and digitalizing every sector is one building block to tackle climate change. -As with every technological domain measuring and understanding how much you use -is the most important step to approach any task. +As with every technological domain measuring and understanding how much you use +is the most important step to approach any task. The next step is to understand the potentials for optimizations. Sometimes this are very low hangig fruits like [green hosting](https://www.thegreenwebfoundation.org/). @@ -53,7 +54,7 @@ After the user interaction software has to do background jobs. Be it cronjobs, M Even in the age of virtualization machines often have significant idle times. In the user landscape this is often compensated with sleep modes. In the server world energy saving modes typically do not exist. #### Process energy -Software can so fast that it is not perceivable if the usage was 10ms or 100ms. Nevertheless the energy difference can be enormous and +Software can so fast that it is not perceivable if the usage was 10ms or 100ms. Nevertheless the energy difference can be enormous and is hard to estimate if scaled up. @@ -86,18 +87,18 @@ software runs through a Continuous Integration (CI) Pipeline and all the tests a This process is often very opaque and the true cost hidden from the developer as it takes place in the cloud or on specialized SAAS platforms. -The understanding the cost of building software and the potential savings when batching the process or even splitting +The understanding the cost of building software and the potential savings when batching the process or even splitting the Pipeline to only execute relevant parts can be tremendous. ### Our work -As said before: Measuring is the first key to understanding, therefore our main focus here lies in +As said before: Measuring is the first key to understanding, therefore our main focus here lies in the visiblity and education about the energy cost for developing software. We develop in-line plugins for Github Actions and badges that make other people aware how much the build costs. Also we focus in the building process for static sites, which typically have a lower cost of operating but incur a build cost. -Checkout out our [OpenEnergyBadge project](/projects/open-energy-badge), our [Eco CI-plugins for Github](/projects/eco-ci) or our [Case Studies](/case-studies/) on the topic. +Checkout out our [OpenEnergyBadge project](/projects/open-energy-badge), our [Eco CI-plugins for Github](/projects/eco-ci) or our [Case Studies](/case-studies/) on the topic. Example badge: {{< rawhtml >}} {{< /rawhtml >}} @@ -106,7 +107,7 @@ Network {{}} -Network transmissions are coming more and more into the focus as one of +Network transmissions are coming more and more into the focus as one of the main drivers of software carbon emissions. Great tools like [Websitecarbon.com](https://www.websitecarbon.com/) allow the visiblity of the cost for a typical @@ -121,7 +122,7 @@ In the advent of the internet network transfer was typically metered. By introdu the internet increased its adoption and became ubiquitous. However flatrates have introduced a decoupling from the nature of network transmissions not really -having a flat carbon emission. +having a flat carbon emission. This creates a disconnectedness and misunderstandig that every use of network will at some point lead to an increase in carbon emissions. Sometimes linearly, sometimes in stages. (See [Gigabytes to kWh](/co2-formulas/#gigabytes-to-kwh)) @@ -142,7 +143,7 @@ recommendations like - Using compression techniques - Altering connection formats like HTTP -> HTTP2 - NOT delivering content that has no strong benefit -- Reducing update cycles and polling intervals +- Reducing update cycles and polling intervals {{< whiteblock >}} Embodied carbon diff --git a/content/blog/DRAFT-the-quest-for-VM-energy-attribution.md b/content/blog/DRAFT-the-quest-for-VM-energy-attribution.md new file mode 100644 index 0000000..4d1a041 --- /dev/null +++ b/content/blog/DRAFT-the-quest-for-VM-energy-attribution.md @@ -0,0 +1,41 @@ +--- +title: "The quest for VM energy attribution" +draft: true +date: 2023-03-02 +author: "Arne Tarara" +authorlink: "https://www.linkedin.com/in/arne-tarara" +--- + +Nowadays most of the compute happens in virtualized systems. Cloud vendors have machines with many many cores and as a user you get the share that you are paying for. + +In our research about energy consumption of software we often use bare-metal systems where the whole host is used to benchmark a software. See for instance our [Cluster]() with examples for the machines that we use. + +We can answer many questions about software energy consumptions with these systems, such has how does the software perform over time, how strong is it affected by bloat, how does the energy profile compare do a different branch of the software and how much savings happen when a certain optimization is done. + +However very often we get the question: "How will these values differ if I run the software in my cloud?" + +Since cloud systems are virtualized and heavily restricted, four main problems have to be considered here: +- Cloud vendors do not allow to in-chip energy estimation systems like RAPL +- Cloud vendors do typically not open access to power metering facilities of the BMC like IPMI or similar +- Cloud vendors do typically not have nor not open access to external power-meters +- Since you are running in a VM, even if all of the other three questions could be solved, how would you even attribute the energy? + +In our [last post] we have looked at energy attribution mechanism that happen on the process / container level, which however are really only designed to work on bare-metal systems. +Especially since systems like [RAPL] are only for root users available on most of modern operating systems. + +The easy answer would be: Well we just count *the work* that has been done by process and then we split the total energy of the systems according to the *share of work*. +An example for a technical implementation of this could be if you count the CPU instructions per process and for the whole sysytem you could make an attribution. + +The same could technically work for a VM! However it would be very complex to have here all the edge cases covered and also the overhead has to be taken into consideration. +Also, to allow this, some forwarding mechanism has to be safely implemented in the VM. + +All solvable problems though, but another question arises when thinking about this: Does it even make sense? + +The question originates from the fact that many server machines out there are non-linear in their energy consumption per ratio of throughput [SPECpower](https://www.spec.org/power_ssj2008/results/res2023q3/power_ssj2008-20230619-01282.html) + +So this would mean that the exact same piece of code on a system would have a very different energy profile if the total machine is loaded or unloaded. So drastically even that it might incur a 50% change or more. + +Technically the same situation appears also if you have a software architecture with three components that utilizes a system to 50% and now you introduce extra functionality, add a fourth component and now *magically* the partial energy per throughput profile of the other three components increases or decreases. + +So the question is: Does it even make any sense at all to at pieces of a software (processes) or at VMs? + diff --git a/content/blog/adventures-in-dc-measurement.md b/content/blog/adventures-in-dc-measurement.md index 42bac05..4376e3e 100644 --- a/content/blog/adventures-in-dc-measurement.md +++ b/content/blog/adventures-in-dc-measurement.md @@ -9,7 +9,7 @@ authorlink: "https://www.linkedin.com/in/arne-tarara" We finally got our DC measurement setup ready ... sadly around one month later than expected. -If you wanna see first results go here: +If you wanna see first results go here: - [Wordpress sample measurement](https://metrics.green-coding.io/stats.html?id=2213b2ec-f96f-4fb4-9fc9-82094bdee1bb) - [Stress sample measurement](https://metrics.green-coding.io/stats.html?id=f99e563d-2c5c-453d-99fe-5ac9f6f307ac) @@ -17,14 +17,14 @@ If you wann get the details of our journey, why we are integrating it into our t ## Why? -Current energy measurement in software that works in a small time resolution is mostly done in with -[RAPL](https://01.org/blogs/2014/running-average-power-limit-%E2%80%93-rapl) interfaces. +Current energy measurement in software that works in a small time resolution is mostly done in with +[RAPL](https://01.org/blogs/2014/running-average-power-limit-%E2%80%93-rapl) interfaces. Typically either [directly](https://web.eece.maine.edu/~vweaver/projects/rapl/) or through tools like [Scaphandre](https://github.com/hubblo-org/scaphandre). -If you want to have more direct DC power readings you can use the [Marcher system from the Texas State University](https://greensoft.cs.txstate.edu/index.php/2018/05/22/marcher-the-need-for-a-fine-grained-software-energy-efficiency-measuring-platform/) with their [Green Code](https://greencode.cs.txstate.edu/) hosted service. +If you want to have more direct DC power readings you can use the [Marcher system from the Texas State University](https://greensoft.cs.txstate.edu/index.php/2018/05/22/marcher-the-need-for-a-fine-grained-software-energy-efficiency-measuring-platform/) with their [Green Code](https://greencode.cs.txstate.edu/) hosted service. -While this last option works generally fine especially with C code, we had some problem to get more complex PHP and Python code running. +While this last option works generally fine especially with C code, we had some problem to get more complex PHP and Python code running. You can however apply for an SSH access to get more complex code working, we did not try though. Also the jQuery interface is partially broken and getting a detailed power consumption is not possible anymore. @@ -112,7 +112,7 @@ Since we are experiencing different voltages on the different 12 V lanes we susp The next iteration will use special current measurement resistors and also look into reducing the measurement noise a bit by evaluating a single-ended voltage measurement where we feed the analogue ground back to the PicoLog HDR ADC-24. -Another issue we are currently having is that the timestamps of the measurement results are not synchronized with the clock of the reference system. In the current setup we have the software for the PicoLog running not on the System under Test (SUT) but on a separate machine. +Another issue we are currently having is that the timestamps of the measurement results are not synchronized with the clock of the reference system. In the current setup we have the software for the PicoLog running not on the System under Test (SUT) but on a separate machine. The reason for that is that we initially configured our measurement setup to measure a windows machine, where our tools are not running atm. Since we are now measuring Linux only there is no reason anymore for it. @@ -121,11 +121,11 @@ We will update the measurement setup as well as the measurements on our [Metrics ## Whats next -As soon as we have updated the measurement setup and also falsified with the [Eco KDE Team]() and their DC measurements we will build the linear model out of it and create calibration curves for our other 4 machines +As soon as we have updated the measurement setup and also falsified with the [Eco KDE Team]() and their DC measurements we will build the linear model out of it and create calibration curves for our other 4 machines we have sitting around. Also we have ordered two Dell Power Edge Server blades to falsify our measurements with the official readings from the [Spec Power Database](https://spec.org/power_ssj2008/results/) -We hope to make the model robust enough so we can translate it to server environements in a similar +We hope to make the model robust enough so we can translate it to server environements in a similar fashion that is currently done for instance by [David Mytton](https://github.com/cloud-carbon-footprint/cloud-carbon-coefficients) and the [Cloud Carbon Footprint](https://www.cloudcarbonfootprint.org/docs/methodology/) project. Only with a better time resolution and more accurate data. \ No newline at end of file diff --git a/content/blog/carbon-aware-development.md b/content/blog/carbon-aware-development.md index ba38922..4230ca6 100644 --- a/content/blog/carbon-aware-development.md +++ b/content/blog/carbon-aware-development.md @@ -8,7 +8,7 @@ authorlink: "https://www.linkedin.com/in/dietgerhoffmann/" While writing the [“Software Life Cycle Assessment done in the wild”](/blog/software-life-cycle-assessment/) article I needed to develop a little server that could do three simple tasks. Because the article became too long I decided to separate the methodology how to develop with environmental impact in mind into a dedicated part. Feel free to head over to the [SLCA](/blog/software-life-cycle-assessment/) article and read the first part to see why we are developing this server. -In this article I want to introduce the new concept of Carbon Aware Development. While there is some prior work on how to measure carbon emissions of software, we at [Green Coding Berlin have loads on the topic](https://www.green-coding.io/blog/), there is no usable and tool based framework, I am aware of, that encourages developers to think about their choices in regards to carbon from the beginning (However there are theoretical ones like for instance [the GREENSOFT model](https://www.umwelt-campus.de/fileadmin/Umwelt-Campus/Greensoft/The_GREENSOFT_Model_A_reference_model_fo.pdf)). The main focus, in this article, is on the **development** phase of the Software Life Cycle Assessment with solutions for the usage phase discussed in the other article. Developing software is an iterative process. Often decisions we take very early on have major consequences later on. The most obvious being the programming language and the underlying architecture everything is based on. While many factors are taken into account early on, carbon is nearly never one. +In this article I want to introduce the new concept of Carbon Aware Development. While there is some prior work on how to measure carbon emissions of software, we at [Green Coding Solutions have loads on the topic](https://www.green-coding.io/blog/), there is no usable and tool based framework, I am aware of, that encourages developers to think about their choices in regards to carbon from the beginning (However there are theoretical ones like for instance [the GREENSOFT model](https://www.umwelt-campus.de/fileadmin/Umwelt-Campus/Greensoft/The_GREENSOFT_Model_A_reference_model_fo.pdf)). The main focus, in this article, is on the **development** phase of the Software Life Cycle Assessment with solutions for the usage phase discussed in the other article. Developing software is an iterative process. Often decisions we take very early on have major consequences later on. The most obvious being the programming language and the underlying architecture everything is based on. While many factors are taken into account early on, carbon is nearly never one. > **Carbon Aware Development** is a methodology in which resource usage is seen as a first class metric in evaluating software and implementation decisions. It acknowledges that software is always in ongoing development. > diff --git a/content/blog/cloud-energy-usage-data.md b/content/blog/cloud-energy-usage-data.md index 58575da..16e0640 100644 --- a/content/blog/cloud-energy-usage-data.md +++ b/content/blog/cloud-energy-usage-data.md @@ -10,12 +10,12 @@ Getting power usage data in cloud is a tough topic and one of the reasons projec Typically cloud vendors, especially hyperscalers, do neither supply energy consumption data of the whole machine from PDUs or similar, nor do they provide access to CPU internal energy data like for instance RAPL. -Especially for RAPL there are historical and security reasons. One of such, Platypus, we have looked into a bit more detail +Especially for RAPL there are historical and security reasons. One of such, Platypus, we have looked into a bit more detail with special regards to migitation mechanisms provided by Intel in our article [RAPL, SGX AND ENERGY FILTERING - INFLUENCES ON POWER CONSUMPTION -](https://www.green-coding.io/case-studies/rapl-and-sgx/). +](https://www.green-coding.io/case-studies/rapl-and-sgx/). -No cloud provider to our knowledge provides direct energy data of the machine. The only known thing are the -carbon reports by hyperscalers that show you the emissions for your services. +No cloud provider to our knowledge provides direct energy data of the machine. The only known thing are the +carbon reports by hyperscalers that show you the emissions for your services. Emissions however may vary strongly, as they have another layer of complexity on top of the electricyity: - PUE of the datacenter @@ -40,7 +40,7 @@ A new project by the Green Software Foundation is also focussing on making this ## Using estimation models -If you cannot measure you have to estimate. We have written a prior [blog article](https://www.green-coding.io/blog/specpower-model-with-xgboost-open-sourced/) about how this can be done with an +If you cannot measure you have to estimate. We have written a prior [blog article](https://www.green-coding.io/blog/specpower-model-with-xgboost-open-sourced/) about how this can be done with an easy machine learning model and provided an [extensive documentation on Github](https://github.com/green-coding-berlin/spec-power-model) how to discover the needed input parameters. However sometimes you need auxillary information like more details about the hypervisor and also which machine specific registers (MSRs) are accessible @@ -50,9 +50,9 @@ A project that tries to leverage information here is [MSR Cloud Tools](https://g ## How to find out what hardware your are using -We ordered it from "easiest" to "hardest". Altough writing an email might not really +We ordered it from "easiest" to "hardest". Altough writing an email might not really scale, it usually gives you the best information most of the time. -- First find out the hypervisor you are on. This usually helps with narrowing down later: +- First find out the hypervisor you are on. This usually helps with narrowing down later: + Through clocksource: `cat /sys/devices/system/clocksource/clocksource0/current_clocksource` or `cat /sys/bus/clocksource/devices/clocksource0/current_clocksource` + Or through 3rd party tool: `sudo apt-get install virt-what -y && sudo virt-what` - Reading the docs from your provider and "knowing" which model you are on @@ -71,7 +71,7 @@ scale, it usually gives you the best information most of the time. ## Checking MSRs directly -First you should check which hypervisor you are on to get a broad idea what to expect. Hyper-V, XEN etc. typically +First you should check which hypervisor you are on to get a broad idea what to expect. Hyper-V, XEN etc. typically block similar registers by default, so this should be the starting point. The easisest way to do this is to read from `dmidecode` or use the tool `virt-what`. @@ -106,7 +106,7 @@ do done ``` -In the following you find the compiled list of the hypervisors and the readable registers we found in Github Shared runners, +In the following you find the compiled list of the hypervisors and the readable registers we found in Github Shared runners, AWS and Hetzner. ## Github Shared Runner (Linux free) @@ -236,7 +236,7 @@ $ sudo dmidecode | grep -i -e manufacturer -e product -e vendor $ sudo virt-what xen xen-hvm - aws + aws ``` @@ -304,7 +304,7 @@ xen-hvm Reading RAPL register: ```$ sudo modprobe msr && sudo rdmsr -d 0x611 -0 +0 ``` This indicates RAPL is blocked. @@ -943,7 +943,7 @@ Overprovisioning means that the vCPU you are having is not actually assigned to This is very important as even if you have the possibility of deriving any energy metric it might be that you are mal- attributing due to over-provisioning. -An example would be: A 12 thread machine, which would normally be able to have 12 vCPUs hosts actually +An example would be: A 12 thread machine, which would normally be able to have 12 vCPUs hosts actually 24 clients. Every vCPU is here assigned to two clients in parallel. One way to check that for instance is to look at the **steal** time of the CPU. @@ -966,8 +966,8 @@ any values that you get in a VM in the cloud. ## White Hat alternative -What you technically can do though is to probe your cpu for features like first -starting with `cat /proc/cpuinfo` and looking only for true values. +What you technically can do though is to probe your cpu for features like first +starting with `cat /proc/cpuinfo` and looking only for true values. Then looking at the speed and flags and try to derive the model from there. @@ -976,7 +976,7 @@ Or you can do feature probing to run custom code and see how your processor beha But is this really worth the effort? Can't we just assume a "generic" CPU and try to take it from there? -To our knowledge no. Machines may vary drastically in energy consumption given +To our knowledge no. Machines may vary drastically in energy consumption given you change the underlying hardware. We are easily talking numbers that are 2x bigger or smaller JUST for the CPU. @@ -998,7 +998,7 @@ On heroku dynos the `/sys` and `/dev` filesystem is severley limited. MSRs are not forwarded and you cannot even tell which machine you are on. -Also cgroups information is not forwarded, so you cannot really tell how much CPU +Also cgroups information is not forwarded, so you cannot really tell how much CPU time you have been using. Only `/proc/stat` is available with the information for all CPUs. In our test it was 7 CPUs. @@ -1008,7 +1008,7 @@ as it is always installed with a random user. However if the user is always the same as during the install process it might just work .... Neverthelesse the MSRs are not accessible. -So the only hope really lies in the fact that during installtime of the buildpack we can +So the only hope really lies in the fact that during installtime of the buildpack we can read more stuff than we can when entering the dyno. This is to be done in a future post. diff --git a/content/blog/eco-ci-activity-checker-released.md b/content/blog/eco-ci-activity-checker-released.md index c582eee..c0efc6e 100644 --- a/content/blog/eco-ci-activity-checker-released.md +++ b/content/blog/eco-ci-activity-checker-released.md @@ -10,7 +10,7 @@ author: "Dan Mateas" {{< /infobox >}} -One question we've been tinkering around with here at Green Coding Berlin is how can we make CI pipelines around the world a little bit greener. +One question we've been tinkering around with here at Green Coding Solutions is how can we make CI pipelines around the world a little bit greener. We have noticed the constantly rising popularity of [Github Actions](https://github.com/features/actions) and its use in various CI jobs such as automated tests. We use it ourselves for this reason in [our open-source repository](https://github.com/green-coding-berlin/green-metrics-tool/actions). One nice feature of github actions is its marketplace where you can find and use publically published "Actions" in your workflow. @@ -22,7 +22,7 @@ An oft-utilized strategy for running CI pipelines, especially those that take a [👉 Eco CI Activity Checker in the Github Marketplace](https://github.com/marketplace/actions/eco-ci-activity-checker) -Here's an example of the Action in action: +Here's an example of the Action in action: ``` jobs: check_commits: @@ -58,7 +58,7 @@ Then later on in our workflow, we simply make the *main* job conditional on the with: ref: 'dev' submodules: 'true' - + - name: 'Setup, Run, and Teardown Tests' uses: ./.github/actions/gmt-pytest ``` diff --git a/content/blog/eco-ci-energy-estimation.md b/content/blog/eco-ci-energy-estimation.md index 4074378..ceb7d8e 100644 --- a/content/blog/eco-ci-energy-estimation.md +++ b/content/blog/eco-ci-energy-estimation.md @@ -56,7 +56,7 @@ Once you call the `get-measurement` task a second time, it will output the energ {{< /rawhtml >}} -If you call start-measurement again, it will reset cpu utilization file. This way you can get discrete energy estimations for different sections of your workflow file. +If you call start-measurement again, it will reset cpu utilization file. This way you can get discrete energy estimations for different sections of your workflow file. Something to be aware of - many workflows will have different jobs. Each job spins up a different virtual machine which might be on a different phyiscal machine. This means that you need to call our action, including the initialize task, for each job. diff --git a/content/blog/eco-ci-gitlab-release.md b/content/blog/eco-ci-gitlab-release.md index ca037c1..e8b0b29 100644 --- a/content/blog/eco-ci-gitlab-release.md +++ b/content/blog/eco-ci-gitlab-release.md @@ -81,7 +81,7 @@ We have improved the front end we provide on [our website](https://metrics.green {{< rawhtml >}} The individual CI Runs pages now support GitLab projects as well and has features which we outlined in our previous eco-ci article{{< /rawhtml >}}. ## Open Source Projects we're monitoring -As part of our goal to encourage green coding practices, and understand what features / wishes a developer working on real-world projects with a green coding mindset would want, we decided to fork some popular open source repositories and integrate the Eco-CI into their existing workflows. This gives us insight on how easy it is to use, what features are missing, what real-world edge cases don't work, and how valuable the information we provide actually is and what we can do to improve it. +As part of our goal to encourage green coding practices, and understand what features / wishes a developer working on real-world projects with a green coding mindset would want, we decided to fork some popular open source repositories and integrate the Eco-CI into their existing workflows. This gives us insight on how easy it is to use, what features are missing, what real-world edge cases don't work, and how valuable the information we provide actually is and what we can do to improve it. Currently we have forked and integrated three github projects: [Django](https://github.com/green-coding-berlin/django), [Flask](https://github.com/green-coding-berlin/flask), and [curl](https://github.com/green-coding-berlin/curl), as well as one Gitlab project, [OpenMW](https://gitlab.com/green-coding-berlin/eco-ci/openmw). Our plan is to pretend that we are sustainability engineers for these projects, keep them in sync, track their carbon footprint, and see what we can improve on with this information. diff --git a/content/blog/firefox-104-energy-measurements.md b/content/blog/firefox-104-energy-measurements.md index 274c87b..c53c988 100644 --- a/content/blog/firefox-104-energy-measurements.md +++ b/content/blog/firefox-104-energy-measurements.md @@ -8,7 +8,7 @@ authorlink: "https://www.linkedin.com/in/arne-tarara" --- {{< infobox >}} - In an earlier version of the article the calculation statement contained a message about us + In an earlier version of the article the calculation statement contained a message about us being confused why the conversion factor is 277000000. Thanks to Silas Duddeck from the Goethe University in Frankfurt who pointed out that the factor should be 1J = 2,777778⋅10-7kWh = (1/3600000)kWh => 277777777 @@ -22,13 +22,13 @@ Mozilla released a new [version 104](https://www.mozilla.org/en-US/firefox/104.0 Firefox 104 power profiler in action {{< /rawhtml >}} -This immediately sparked our interest and we wanted to give it some test run against +This immediately sparked our interest and we wanted to give it some test run against a containerized browser in our [Green Metrics Tool](https://github.com/green-coding-berlin/green-metrics-tool). According to the information from Mozilla this works only for M1 Macs or on Windows 11. -Since our Green Metrics Tool runs on Linux only we could not really make comparisons -on one machine, but we could give it a first look none the less and look into some +Since our Green Metrics Tool runs on Linux only we could not really make comparisons +on one machine, but we could give it a first look none the less and look into some details where the values are coming from. ## Research Questions @@ -51,7 +51,7 @@ Tab where you can inspect the whole performance analysis. Only the ones labeled *Process Power* are relevant for the Power output total. -Firefox actually splits the power per process, which is quite unusual for the RAPL readings we +Firefox actually splits the power per process, which is quite unusual for the RAPL readings we have come accross so far, but more on that later, when we look at the source code. The spikes for visiting the Umweltbundesamt.de homepage where at the typical spots @@ -62,16 +62,16 @@ and did not exceed **4 W** in our case. {{< /rawhtml >}} -In order to get an energy value we somehow have to sum up these values though with their +In order to get an energy value we somehow have to sum up these values though with their respective time measurement interval. Sampling time we defined was **10 ms**. -The profiler is able to export a **JSON Dump** if you click on *Upload local profile* in the +The profiler is able to export a **JSON Dump** if you click on *Upload local profile* in the top right corner and then *Download* it. A script to convert this we have attached in our Github repository. -~~Apparently we had to convert all readings in the JSON by the weird value of **277000000** and +~~Apparently we had to convert all readings in the JSON by the weird value of **277000000** and are unsure where this value is coming from .... but at least it makes the raw values from the JSON to be identical with the values we see in the profiler UI.~~ @@ -92,7 +92,7 @@ and then waiting 2 seconds did incur an energy cost of around **2mWh**. Although we also had two very unusal measurements of **~40 mWh** that we were not able to reproduce later the measurements seemed generally very stable. -We repeated 10 measurements in total. Since it was just a first look and measurements +We repeated 10 measurements in total. Since it was just a first look and measurements where started and stopped manually we opted for not reporting Std.Dev. etc, as the manual handling has probably more influence here anyway ... @@ -142,7 +142,7 @@ with open(args.filename) as a: ## Comparison on MacBook Pro 2015 -On our **MacBook Pro Mid-2015 with an Intel Core i7-5557U @ 3.1 GHz** running Ubuntu 22.04 we executed a +On our **MacBook Pro Mid-2015 with an Intel Core i7-5557U @ 3.1 GHz** running Ubuntu 22.04 we executed a measurement with our container based setup with the Green Metrics Tool. You can find [one sample measurement here](https://metrics.green-coding.io/stats.html?id=1102d24e-da8e-444e-ae60-a0c7b1694d90) @@ -152,13 +152,13 @@ The component energy of CPU and DRAM, which is probably the most comparable, her It is important to note that this is not the energy per process, but the energy for the whole system. -Measurements between machines are anyway not comparable that easily, as they will have +Measurements between machines are anyway not comparable that easily, as they will have non linear load curves that you would have to account for. ### Takeaway -So the takeaway and also what we wanted to find out as a research question from the start +So the takeaway and also what we wanted to find out as a research question from the start is that the M1 power measurements in Firefox seem to produce a value that looks very interesting and deserves further inspection. @@ -170,12 +170,12 @@ increase the reproducability greatly ## Further plans -We were very suprised that the XNU kernel in macOS supplies a feature that can really +We were very suprised that the XNU kernel in macOS supplies a feature that can really deliver the energy per task. This is what was the most interesting for us. -If you look into the current source tree of Firefox stable it is visiibe that the +If you look into the current source tree of Firefox stable it is visiibe that the [code to instrument the measurement on macOS](https://hg.mozilla.org/integration/autoland/diff/f33bef1f7d560e494bab0599e2022a3ea53902f9/tools/profiler/core/PowerCounters-mac.cpp) is not too complex. However finding relevant documentation posed a problem at the time of writing this article. diff --git a/content/blog/nop-linux.md b/content/blog/nop-linux.md index 9a272cb..7c487d7 100644 --- a/content/blog/nop-linux.md +++ b/content/blog/nop-linux.md @@ -6,13 +6,13 @@ author: "Didi Hoffmann" authorlink: "https://www.linkedin.com/in/dietgerhoffmann/" --- -At Green Coding Berlin (GCB), one goal is to enable reproducible runs on our cluster. An important step towards accurate measurements was the creation of NOP Linux, our custom Linux distro that disables as many background processes as possible to avoid interruptions during measurements. Another crucial step was ensuring the reliable operation of the [PowerSpy2](https://docs.green-coding.io/docs/measuring/metric-providers/psu-energy-ac-powerspy2/), so we could measure the entire power consumption. +At Green Coding Solutions (GCS), one goal is to enable reproducible runs on our cluster. An important step towards accurate measurements was the creation of NOP Linux, our custom Linux distro that disables as many background processes as possible to avoid interruptions during measurements. Another crucial step was ensuring the reliable operation of the [PowerSpy2](https://docs.green-coding.io/docs/measuring/metric-providers/psu-energy-ac-powerspy2/), so we could measure the entire power consumption. We wanted to create a cluster that allowed users to select the server on which they'd like to run the benchmark. Initially, we aimed for full automation and looked at the excellent tool from Canonical, [MAAS](https://maas.io/). As we use Ubuntu as our reference system, this seemed to be the logical choice. Although the tool was impressive, it required a daemon running on the machine, which created multiple interruptions during our measurements. This led us to reevaluate our tooling, and we decided to try a simpler approach using PXE. While there is a great description [1], and the general flow worked very well, we invested a significant amount of time and effort in configuring the machines correctly. Getting the entire installation flow working with reboots, different configurations like PowerSpy, and the multitude of different servers we wanted to use presented a considerable overhead. Additionally, we have our machines distributed across various data centers, and we needed to set up a complex networking layer for the DHCP discovery to work. While this was a scalable solution, it required substantial overhead that had to be maintained. Moreover, our tool develops quite rapidly, so we would have to keep updating the installation process. As a small company, this was not feasible in our scenario. Consequently, we decided to sacrifice scalability in favor of simplicity. In the meantime, we had built a complex test setup with various servers and a complicated setup that we could now disassemble. The main lesson learned for the future is to start with the simplest solution that solves the problem and continually reevaluate your assumptions and needs. We are aware that there are a multitude of configuration systems out there that don't require a client running on the machine to be configured and that automate some of the tasks we will now do manually. But we decided to keep it very simple for now and not invest more time into another solution. -At Green Coding Berlin, we are committed to not only creating efficient and reproducible programming solutions but also sharing our findings and tools with the wider community. We firmly believe in the principles of open-source, the power of shared knowledge, and the benefits of collaborative development. Our aim is to create tools and systems that can be utilized by anyone, without the restrictions of proprietary licenses. We don't just want to make our solutions better - we want to make programming better, for everyone. One of the exciting initiatives that align with our philosophy is the Blue Angel for Software. We support this cause and believe that our tools and systems should be made available for such uses. By making our developments publicly available, we hope to contribute to the broader objective of creating software that is efficient, effective, and transparent. +At Green Coding Solutions, we are committed to not only creating efficient and reproducible programming solutions but also sharing our findings and tools with the wider community. We firmly believe in the principles of open-source, the power of shared knowledge, and the benefits of collaborative development. Our aim is to create tools and systems that can be utilized by anyone, without the restrictions of proprietary licenses. We don't just want to make our solutions better - we want to make programming better, for everyone. One of the exciting initiatives that align with our philosophy is the Blue Angel for Software. We support this cause and believe that our tools and systems should be made available for such uses. By making our developments publicly available, we hope to contribute to the broader objective of creating software that is efficient, effective, and transparent. **The system we are using now** @@ -23,7 +23,7 @@ As previously mentioned, the current system will not scale to accommodate thousa We have now opted for quite a simple solution. You will need a server that exposes the database externally and all results will be written to this server. We then have a `client.py` script that runs on every server that periodically queries the server for jobs and if so executes the measurement undisturbed. After a job is finished the client does some cleanup tasks and checks if there is an update for the GMT and also for the operating system. It then retries to get a job till there are no more jobs left on which the client sleeps for 5 minutes and retries. On every wake up we send a message to the server that the client is up and functional. So we can check server side that all clients are up and working. -To create your own GCB cluster, you can follow these steps: +To create your own GCS cluster, you can follow these steps: ## 1) Install Ubuntu diff --git a/content/blog/power-measurement-on-macos.md b/content/blog/power-measurement-on-macos.md index dce86aa..287b021 100644 --- a/content/blog/power-measurement-on-macos.md +++ b/content/blog/power-measurement-on-macos.md @@ -9,20 +9,20 @@ authorlink: "https://www.linkedin.com/in/arne-tarara" Last week we have been on the [SDIA event for sustainble software](https://sdialliance.org/landing/softawere-hackathon/) and held a workshop on measuring the energy digital products. -We presented approchaes where you measure the energy either with tool like [Scaphandre](https://github.com/hubblo-org/scaphandre) or +We presented approchaes where you measure the energy either with tool like [Scaphandre](https://github.com/hubblo-org/scaphandre) or the [Green Metrics Tool](https://github.com/green-coding-berlin/green-metrics-tool) as well as approaches for restricted environments like the cloud. Here the [SDIA DEF Model](https://knowledge.sdialliance.org/digital-environmental-footprint) and our [SPECPower linear model](https://github.com/green-coding-berlin/spec-power-model) where presented. -What the two former tools have in common though is that they are designed to work +What the two former tools have in common though is that they are designed to work exclusively on Linux. Reason for that being is that the RAPL interfaces are nicely integrated into the linux kernel and readily readable from a virtual file endpoint. The downside of it is, that it requires root to read this data on Linux. -This is also why the [Firefox 104 energy measurement](http://localhost:1313/blog/firefox-104-energy-measurements/) only works either on +This is also why the [Firefox 104 energy measurement](http://localhost:1313/blog/firefox-104-energy-measurements/) only works either on Windows or on a Mac M1, because here these interfaces are available in userland. -In the workshop we saw many participants with exclusively MacBooks either running +In the workshop we saw many participants with exclusively MacBooks either running M1 chips or Intel chips and thus we wanted to make a blog post with some info on how to get measurement data on these setups. @@ -44,7 +44,7 @@ This is usually your first stop when trying to get some metrics out of your syst The values to look at are **Energy Impact* and **12 hr Power**. Both are unfortunately some custom and relative values that are not even comparable between two machines. -The just tell how much much energy the app is using compared to the device history +The just tell how much much energy the app is using compared to the device history of energy usage and other apps on the system. We are unsure if you could even derive an absolute value from that with just this @@ -69,12 +69,12 @@ Output is: - Temperature - Utilization -The command line tool can even execute a program and run collect all these metrics +The command line tool can even execute a program and run collect all these metrics during its execution. As well as RAPL on Linux it has the issue that RAPL metrics are system wide. -So even if you execute a `sleep` program, which would incur an insignificant energy -draw, you might get high energy readings if some CPU intensive program is running +So even if you execute a `sleep` program, which would incur an insignificant energy +draw, you might get high energy readings if some CPU intensive program is running in the background. @@ -85,7 +85,7 @@ in the background. It serves as a very nice introspection tool for your system energy draw though. What we really liked is the functionality to run certain *tests*. -For instance turning off Intel SpeedStep and letting the core always run at max +For instance turning off Intel SpeedStep and letting the core always run at max frequency, or testing for AVX-512 instructions in particular etc. See details in the screenshot on the right. @@ -110,10 +110,10 @@ and have to be converted to Watts to be comparable with Intel Power Gadget. `powermetrics` comes pre-installed on macOS and can handle M1 and also Intel machines. -The output is mostly unified, but shows some differences on the M1 machines mainly due +The output is mostly unified, but shows some differences on the M1 machines mainly due to the summary given at the end and also because the M1 has different types of cores. -`powermetrics` sadly also needs root access although the underlying APIs are technically +`powermetrics` sadly also needs root access although the underlying APIs are technically readable from userland. It can provide a lot of diagnostic output and is the only tool we know if you wanna @@ -127,7 +127,7 @@ Reading these is done through the `rdmsr` instruction of the CPU. Sadly this cannot be sent to the processor from userland, as it is a ring-0 instruction. -However if you ever where to write a kernel extension that does so, this code would +However if you ever where to write a kernel extension that does so, this code would issue the `rdmsr` instruction. You just have to read the energy register `0x611` with it: ```C @@ -135,10 +135,10 @@ asm volatile("rdmsr":"=a"(lo),"=d"(hi):"c"(0x10)); ``` In the linux world the kernel extension `modprobe msr` forwards this functionality -to a virtual file in the `/dev` directory. However we could not find a similar +to a virtual file in the `/dev` directory. However we could not find a similar macOS kext that would do the same. -There is one installed alongside with Intel Power Gadget, but we could not find +There is one installed alongside with Intel Power Gadget, but we could not find any documentation how to leverage that. It might be easily traceable though ... If you know of any well documented 3rd partykext or even one that comes diff --git a/content/blog/releasing-code-bench.md b/content/blog/releasing-code-bench.md index cadf988..b21151a 100644 --- a/content/blog/releasing-code-bench.md +++ b/content/blog/releasing-code-bench.md @@ -10,7 +10,7 @@ Standard benchmarks play a pivotal role in the field of software engineering, se However, it is important to acknowledge that not all code is created equal. In reality, a significant portion of software is far from being optimally written. There are numerous reasons for this. Often, developers are constrained by tight deadlines, lack of interest or passion for the project, or simply the pressure to make things work within a limited timeframe. As a result, a lot of the software available today is not finely tuned or optimized. To mitigate this issue, considerable efforts have been made to enhance compilers and the internal mechanics of CPUs, aiming to compensate for less-than-optimal coding practices. -At Green Coding Berlin we have developed a great measurement infrastructure that enables us to measure software reproducible and really see how changes reflect on the impact software has. Now that we can measure code we want to start applying green coding techniques and see how these affect the resource usage. Also a lot of work is currently going on in this space [[0]](https://www.enviroinfo2023.eu/programme/schedule-overview/friday-schedule/)[[1]](https://www.meetup.com/green-software-development-karlsruhe/events/296796570/) but one big problem is that there is no real benchmark of code that can be optimised and that can be measured before and after the changes are applied. I strongly believe that this is a big problem as currently everyone selects their own little piece of code and such there is a) no way to compare results and b) wrong results or conclusions might happen as there is no sound fundament. +At Green Coding Solutions we have developed a great measurement infrastructure that enables us to measure software reproducible and really see how changes reflect on the impact software has. Now that we can measure code we want to start applying green coding techniques and see how these affect the resource usage. Also a lot of work is currently going on in this space [[0]](https://www.enviroinfo2023.eu/programme/schedule-overview/friday-schedule/)[[1]](https://www.meetup.com/green-software-development-karlsruhe/events/296796570/) but one big problem is that there is no real benchmark of code that can be optimised and that can be measured before and after the changes are applied. I strongly believe that this is a big problem as currently everyone selects their own little piece of code and such there is a) no way to compare results and b) wrong results or conclusions might happen as there is no sound fundament. So we set out to create a repository with such code. The main aims are: diff --git a/content/blog/software-life-cycle-assessment.md b/content/blog/software-life-cycle-assessment.md index 6709a3c..38b1553 100644 --- a/content/blog/software-life-cycle-assessment.md +++ b/content/blog/software-life-cycle-assessment.md @@ -6,7 +6,7 @@ author: "Didi Hoffmann" authorlink: "https://www.linkedin.com/in/dietgerhoffmann/" --- -In this article I want to propose a simple way to do a Software Life Cycle Assessment for a piece of code. While explaining important concepts and tradeoffs that seem sensible for a little project. Of course things could be done differently and I am happy to discuss them in more detail as this is just a first draft of how things could be done. Please head to [https://github.com/green-coding-berlin/SLCA-Code](https://github.com/green-coding-berlin/SLCA-Code) for all source code and discussions. Please also feel free to contribute to the code through pull requests. As Green Coding Berlin we have been working on this topic for some time and a more academic discussion can be found in our chapter in the [Transparency for Software Climate Impact](https://publication2023.bits-und-baeume.org/#book/38) publication (Part of the Shaping Digital Transformation for a Sustainable Society publication). +In this article I want to propose a simple way to do a Software Life Cycle Assessment for a piece of code. While explaining important concepts and tradeoffs that seem sensible for a little project. Of course things could be done differently and I am happy to discuss them in more detail as this is just a first draft of how things could be done. Please head to [https://github.com/green-coding-berlin/SLCA-Code](https://github.com/green-coding-berlin/SLCA-Code) for all source code and discussions. Please also feel free to contribute to the code through pull requests. As Green Coding Solutions we have been working on this topic for some time and a more academic discussion can be found in our chapter in the [Transparency for Software Climate Impact](https://publication2023.bits-und-baeume.org/#book/38) publication (Part of the Shaping Digital Transformation for a Sustainable Society publication). Software Life Cycle Assessment (SLCA) is a topic that is talked about more and more. While Life Cycle Assessment (LCA) is fairly established in the realm of physical things. There are still various discussions in the software space though. In [ISO 14040](https://en.wikipedia.org/wiki/Life-cycle_assessment) and 14044 LCA is defined as the following: @@ -74,10 +74,10 @@ This is probably the phase the least information is available about when doing r Page 6.6 > -Personally I have to disagree. I am quite aware that my development activity produces a lot of environmental impact. Just considering the tower PC with the two screens attached and the various online services I use that I sometimes don’t power down when I go to lunch, which I use for my Linux related development. +Personally I have to disagree. I am quite aware that my development activity produces a lot of environmental impact. Just considering the tower PC with the two screens attached and the various online services I use that I sometimes don’t power down when I go to lunch, which I use for my Linux related development. Or thinking about intensive DevOps or re-optimizations of AI models, which seem to be the norm in modern software products rather than the exception. Also if you think about all the software products that never reach a relevant audience or are prototypes and never reach production this cost would be totally neglected ... the list goes on and on ... -So I wanted to measure this. +So I wanted to measure this. Starting with the most obvious: My development machine. I was quite astonished to realise there is no out of the box solution that I can just install and get a nice summary of the energy used for various tasks over time with nice drilldown etc. So I decided to keep it simple and just assume when my work computer is on I am doing work things and that everything the computer does is related to the project I am currently working on. It is debatable that if Spotify, which is one of the biggest [energy consumers](https://github.com/jpochyla/psst), should really be counted to the development of a web project. But I am listing to music while developing so I would argue that it should count. The same goes for Slack, Mail etc. it is currently unrealistic that I can account the energy usage of each email to the various projects I am working on. So to make things simple: If the computer is on everything is counted towards the main project I am working on. We are currently specking out how this could be solved more finer grain. Unfortunately Linux doesn’t really offer any tools that support energy measurements on a per process level. There are some implementations that use cpu utilisation to split the power, but as discussed in the “[CPU UTILIZATION - A USEFUL METRIC](https://www.green-coding.io/case-studies/cpu-utilization-usefulness/)” article this is sometimes misleading. As also part of this work was done while travelling I developed the code on my M1 Macbook Pro. Because of this I can use the `powermetrics` tool we use when benchmarking software on [MacOS](https://www.green-coding.io/blog/green-coding-on-mac/) which gives me a sort of power measurement through the energy impact value. This is a closed source implementation by Apple but some details are known and exposed in [this article by Mozilla](https://blog.mozilla.org/nnethercote/2015/08/26/what-does-the-os-x-activity-monitors-energy-impact-actually-measure/) for instance. While being far from perfect this value gives you a first impact estimate for each process. @@ -102,7 +102,7 @@ from queue import Queue from pathlib import Path from AppKit import NSScreen -APP_NAME = "gcb_power_logger" +APP_NAME = "gcs_power_logger" app_support_path = Path.home() / 'Library' / 'Application Support' / APP_NAME app_support_path.mkdir(parents=True, exist_ok=True) diff --git a/content/blog/specpower-model-with-xgboost-open-sourced.md b/content/blog/specpower-model-with-xgboost-open-sourced.md index f4271e1..d6150ea 100644 --- a/content/blog/specpower-model-with-xgboost-open-sourced.md +++ b/content/blog/specpower-model-with-xgboost-open-sourced.md @@ -6,13 +6,13 @@ author: "Arne Tarara" authorlink: "https://www.linkedin.com/in/arne-tarara" --- -A detailed blog article is yet to come, but for everyone who follows our +A detailed blog article is yet to come, but for everyone who follows our blog only and not our repositories we wanted to highlight that we open sourced the new XGBoost variant of the [SPECPower estimation model for cloud workloads on github](https://github.com/green-coding-berlin/spec-power-model). -I guess the most interesting parts are the charts, where you can see the -SPECPower data in comparsion to the a model by the [SDIA](https://www.sdialliance.org) formula and also our -old linear model as well as the XGBoost model. +I guess the most interesting parts are the charts, where you can see the +SPECPower data in comparsion to the a model by the [SDIA](https://www.sdialliance.org) formula and also our +old linear model as well as the XGBoost model. ## Summary excerpt @@ -22,11 +22,11 @@ old linear model as well as the XGBoost model. - The linear model is good for parameter exploration, but delivers badly fitted results - The XGBoost model is able to estimate a real world 1-chip machine and an out of sample 2-chip machine from SPECPower very nicely. + However it tends to under-estimate -- We see suprisingly no efficiency gain from applying the SPECPower BIOS settings but rather a smoothing of the curve. +- We see suprisingly no efficiency gain from applying the SPECPower BIOS settings but rather a smoothing of the curve. + The reason to that is currently unknown. ## Detailed Talk & Slides -Here are the slides to our talk, where we present the works on and performance of the model +Here are the slides to our talk, where we present the works on and performance of the model at the PyData Nov'22 Meetup in Berlin. [Download Slides](/slides/PyData-Talk.pdf) \ No newline at end of file diff --git a/content/blog/tdp-and-acp.md b/content/blog/tdp-and-acp.md index 83bb0f4..9141ebd 100644 --- a/content/blog/tdp-and-acp.md +++ b/content/blog/tdp-and-acp.md @@ -6,7 +6,7 @@ author: "Arne Tarara" authorlink: "https://www.linkedin.com/in/arne-tarara" --- -In the green software community we see very often that people use the [TDP of the processor](https://www.intel.com/content/www/us/en/support/articles/000055611/processors.html) +In the green software community we see very often that people use the [TDP of the processor](https://www.intel.com/content/www/us/en/support/articles/000055611/processors.html) as a metric to estimate how much energy the CPU will consume for a specific workload. For instance the [SDIA use-phase estimation model](https://docs.google.com/spreadsheets/d/1uCQVs8mVgfu6fcQLEttDgfqPzhCm1yuf19_9RUDuU6w/edit#gid=1126994188) in it's current form uses this metric. @@ -14,12 +14,12 @@ For instance the [SDIA use-phase estimation model](https://docs.google.com/sprea We have been using this metric in our machine learning model, where it serves as a very good indicator of how much the total energy consumption of the system will be. -However lately I have stumbled over a [white paper from Intel](https://www.intel.com/content/dam/doc/white-paper/resources-xeon-measuring-processor-power-paper.pdf) where they discuss +However lately I have stumbled over a [white paper from Intel](https://www.intel.com/content/dam/doc/white-paper/resources-xeon-measuring-processor-power-paper.pdf) where they discuss the TDP in quite some detail and even compare it to a metric from AMD that I was not aware of: The ACP. Notes summary: -- ACP is a measure by AMD for Opteron processors that gives the average power draw +- ACP is a measure by AMD for Opteron processors that gives the average power draw for the CPU while running a defined set of benchmarks (TPC Benchmark*-C, SPECcpu*2006, SPECjbb*2005, and STREAM). - ACP value for Opteron processors is always lower than TDP - TDP definitions from AMD and Intel are actually different! @@ -46,18 +46,18 @@ This comparison is a bit tricky, as when we look deeper into how the system is a - Adjacent Sector Prefetch - Disabled - ... -So the comparisons are not really on equal terrain. However it has to be clearly noted -that Intel nevertheless has the better performance per watt which is probably due +So the comparisons are not really on equal terrain. However it has to be clearly noted +that Intel nevertheless has the better performance per watt which is probably due to the more single workload focused architecture and higher base frequency. -It shows though that the TDP alone might be a confusing indicator if energy is your +It shows though that the TDP alone might be a confusing indicator if energy is your concern, as it heavily depends on how the processor architecture really is, how the workload is and also how the CPU / system is configured. **Important note:** The White Paper is from 2011 and when looking at the current site from Intel regarding TDP their definition reads a bit differently: [Intel TDP landing page](https://www.intel.com/content/www/us/en/support/articles/000055611/processors.html) -It is unclear if the definition really changed or just the wording is a bit different due +It is unclear if the definition really changed or just the wording is a bit different due to a different author / department. ## Comparing TDPs and summary diff --git a/content/blog/updating-dc-measurement.md b/content/blog/updating-dc-measurement.md index 662d71e..5a2877c 100644 --- a/content/blog/updating-dc-measurement.md +++ b/content/blog/updating-dc-measurement.md @@ -9,7 +9,7 @@ authorlink: "https://www.linkedin.com/in/arne-tarara" In my last blog post I have written about how we finally completed the DC measurement reporter for our [Green Metrics Tool](https://github.com/green-coding-berlin/green-metrics-tool). -In the last days we have looked at reducing the variance of the ATX powerlanes +In the last days we have looked at reducing the variance of the ATX powerlanes and finding out if swapping out the resistors for more stable current measurement resistors can improve the measurement. @@ -20,13 +20,13 @@ Here are the results! - We are now using [**Isabellenhütte PBV 0,005 Ohm**](https://www.conrad.de/de/p/isabellenhuette-pbv-0-005-messwiderstand-0-005-10-w-l-x-b-x-h-22-x-4-x-17-mm-1-st-447366.html?searchType=SearchRedirect) current measurement resistors - We have swapped out all the wiring and replaced it with new wiring that has matching resistances for every connection. + The lines connecting the two inner terminals of the current resistors are with **0.9 Ohm wires** - + The lines connectiong the two outer terminals are connected with **1.7 Ohm wires**. + + The lines connectiong the two outer terminals are connected with **1.7 Ohm wires**. + Accuracy on the measurement is **+/- 0.1 Ohms** - + ## Adding the remaining lines on the ATX connector In the PoC of our last blog post we have concentrated on the six 12 V power lines of the ATX connector. -The connector however features 4 more slots, where one slot however is not connected with the +The connector however features 4 more slots, where one slot however is not connected with the PSU. {{< rawhtml >}} @@ -45,19 +45,19 @@ drop at least one of these lines. We measured the **PS_ON pin** and got a minimal reading of **~ 20 mV** out of it. This pin is only really needed to be connected on boot and seems to be irrelevant for the measurments. -As well in functionality as also in the power that it consumes. +As well in functionality as also in the power that it consumes. So we could included the **11Vsb** and the **PWR_OK** in our measurements. However when looking at the screenshots of the readouts you see that they do not really contribute to the total power. This was expected for PWR_OK (apart from an error case maybe), -but may not be the case for the 11Vsb. +but may not be the case for the 11Vsb. Depending on what device you have attached and how the mainboard wiring is done there could be a power draw on this line. -However in our internal measurements we have not seen any power draw over that line and since -it is kept on a stable voltage it clashes with the behaviour of our input channel. -The input channel was constantly giving out a stable reading of a negative voltage over the +However in our internal measurements we have not seen any power draw over that line and since +it is kept on a stable voltage it clashes with the behaviour of our input channel. +The input channel was constantly giving out a stable reading of a negative voltage over the current resistor although no current was flowing. To quickly falsify our assumption we checked with our AC power readings on a simple wall socket power meter @@ -88,7 +88,7 @@ You can compare the measurements here: picolog hdr adc 24 fujitsu esprimo P956 {{< /rawhtml >}} -As suggested in our previous post the 12 V lines are internally not all wired to the same +As suggested in our previous post the 12 V lines are internally not all wired to the same channel, but are split in two rails. This is also seen in modern sytems, where the [P4 connector](https://en.wikipedia.org/wiki/ATX) can provide a second @@ -99,7 +99,7 @@ Why Fujitsu made the decision to split every 12 V rail in three cables we might By making a short stress test with `stress -c 10` and `stress -m 10` we see that the one rail gets more loaded on a CPU intensive load and the other rail more on a memory intensive load. -They are so distinguishable that by just looking at rail #1 one can tell if the DRAM is accessed. However +They are so distinguishable that by just looking at rail #1 one can tell if the DRAM is accessed. However the direction the other way round does not hold. Rail #2 is also loaded, cause obvioulsy the CPU has to feed the DRAM. Nevertheless this insight might pose an interesting starting point for an algorithmic or ML detection of what kind of load is happening. @@ -108,7 +108,7 @@ Nevertheless this insight might pose an interesting starting point for an algori The total energy consumed with the new setup is very close to the old measurement with the old resistors. -Compare the readings on: +Compare the readings on: - [Old Resitors Measurement](https://metrics.green-coding.io/stats.html?id=f99e563d-2c5c-453d-99fe-5ac9f6f307ac) - [New Resitors Measurement](https://metrics.green-coding.io/stats.html?id=7f4e2725-d84b-4992-aeb7-5f42f797aa73) diff --git a/content/blog/usability-study-of-the-green-metrics-tool.md b/content/blog/usability-study-of-the-green-metrics-tool.md index 8678813..410289b 100644 --- a/content/blog/usability-study-of-the-green-metrics-tool.md +++ b/content/blog/usability-study-of-the-green-metrics-tool.md @@ -7,7 +7,7 @@ authorlink: "https://www.linkedin.com/in/arne-tarara" --- -We recently had a team of aspiring UX researches at Green Coding Berlin that +We recently had a team of aspiring UX researches at Green Coding Solutions that made a research project on how the Green Metrics Tool can be used. This is a guest article with their results @@ -117,10 +117,10 @@ We try to reduce the cognitive load of choices, show the info I need to know fir ## 10 DEFINING THE USER We defined our user persona, as a 32 years guy that is starting his career as a web developer named *Joao*. *Joao* wants to be able to *reduce the damage that he does to the environment by working in tech* but feels *frustrated about not knowing which tools or methods are inside his range of action*. He heard about **Green Coding** and would like to get to know about them and *How it works* so he can be able to use the *Green Metrics Tool* and support the cause. -From here, *Joao* will approach the website by the following flow: -1. Starting at the Homepage to get to know Green Coding and its Community and get interested in knowing about their *Green List*. -2. Once at the *Green List* page, *Joao* will check the *Dashboard* of the *Github Links* that have been already provided by other users and measured by **Green Coding**. This will increase his interest in getting to know about other measurements such as *Energy* and *CPU*. -3. He decides to get deeper and check on two different links to enable the *Comparison* page, where he can see the two links and individual measurements being compared. +From here, *Joao* will approach the website by the following flow: +1. Starting at the Homepage to get to know Green Coding and its Community and get interested in knowing about their *Green List*. +2. Once at the *Green List* page, *Joao* will check the *Dashboard* of the *Github Links* that have been already provided by other users and measured by **Green Coding**. This will increase his interest in getting to know about other measurements such as *Energy* and *CPU*. +3. He decides to get deeper and check on two different links to enable the *Comparison* page, where he can see the two links and individual measurements being compared. 4. That's when he decided to check on *How it works*, to be able to participate and provide a link by himself, read some information about the functionality of the Tool, and see a global picture of the issue that is being taken on. {{< rawhtml >}} User Flow diff --git a/content/blog/version-0.1-beta.md b/content/blog/version-0.1-beta.md index a7b65c9..8fb1d08 100644 --- a/content/blog/version-0.1-beta.md +++ b/content/blog/version-0.1-beta.md @@ -20,10 +20,10 @@ Today we have have released the `0.1-beta` version of our [Green Metrics Tool](h Version `0.1-beta` is considered a non-production version. -We are currently working on the falsification of our energy measurements with public +We are currently working on the falsification of our energy measurements with public energy measurement databases. -Also we will add functionality to compare measurements directly in the frontend as +Also we will add functionality to compare measurements directly in the frontend as well as a better install script to quickstart the tool. The biggest part of work currently for us is the documentation which we are working @@ -41,14 +41,14 @@ If you want to query the Open Data of our tool go to the [self-documenting API]( ## Measurements of the tool -On our [Metrics Dashboard](https://metrics.green-coding.io/) you can find already some measurements we did on +On our [Metrics Dashboard](https://metrics.green-coding.io/) you can find already some measurements we did on a copy of the [The Green Web Foundation](https://www.thegreenwebfoundation.org/) website. Here we compare the energy consumption of a Wordpress version against a static version. - Static: https://metrics.green-coding.io/stats.html?id=a57a7f97-be18-4a77-b7fe-ae0d76b5e4dd - Wordpress: https://metrics.green-coding.io/stats.html?id=619e720e-f2fc-42fa-9786-c13cb4d9fb72 -An example on how to recreate these dashboards and also dive into the raw data to +An example on how to recreate these dashboards and also dive into the raw data to answer more detailed questions we provide [Example Jupyter Notebooks](https://metrics.green-coding.io/data-analysis.html) diff --git a/content/blog/we-are-hiring.md b/content/blog/we-are-hiring.md index 1766ff1..6ab1e68 100644 --- a/content/blog/we-are-hiring.md +++ b/content/blog/we-are-hiring.md @@ -9,7 +9,7 @@ authorlink: "https://www.linkedin.com/in/arne-tarara" Today we have big news which shine a light on what we have planned for the coming months. -Green Coding Berlin is becoming a legal entity in form of a non-profit [Green Coding Berlin gGmbH]. +Green Coding Solutions is becoming a legal entity in form of a non-profit [Green Coding Solutions gGmbH]. We recently joined the Green Software Foundation (https://greensoftware.foundation/) as a General Member and, apart from our meetups and the planned workshop, will be developing a metrics and certification toolchain for Green Software. diff --git a/content/blog/youtube-extension.md b/content/blog/youtube-extension.md index 5d87546..fa271a8 100644 --- a/content/blog/youtube-extension.md +++ b/content/blog/youtube-extension.md @@ -12,7 +12,7 @@ authorlink: "https://www.linkedin.com/in/alexandre-oliv" How many times do we use Youtube only to listen to songs, or to watch videos without the necessity of going for that fancy 4k/HD resolution? Also: do we really want to automatically watch other (algorithmically-chosen) videos after the current video is finished? In other words, how can we have a carbon-friendly approach while still getting the content we wanted? -During the onboarding process at Green Coding Berlin, I was given the task of creating a simple Chrome extension that would turn off the Autoplay feature of Youtube, and would also scale down the videos to the lowest available quality. +During the onboarding process at Green Coding Solutions, I was given the task of creating a simple Chrome extension that would turn off the Autoplay feature of Youtube, and would also scale down the videos to the lowest available quality.   diff --git a/content/case-studies/DRAFT-turbo-boost-and-energy.md b/content/case-studies/DRAFT-turbo-boost-and-energy.md new file mode 100644 index 0000000..c614eea --- /dev/null +++ b/content/case-studies/DRAFT-turbo-boost-and-energy.md @@ -0,0 +1,186 @@ +--- +title: "Turbo Boost and energy - Processor energy configuration series - Part 2" +draft: false +date: 2023-03-02 +author: "Arne Tarara" +authorlink: "https://www.linkedin.com/in/arne-tarara" +--- + +- Turning ON deep c-states. This might increase startup-latency of some workloads, but since Cores can not only go into one fixed Turbo Boost frequency, but actually if only SOME cores go into a stronger Turbo Boost, then they can reach even higher frequencies! So if you have a workload that is single or low-threaded you can profit from extraordinary high frequencies that you might never see when all cores are always running on higher frequencies. + - Source: https://www.vmware.com/explore/video-library/video-landing.html?sessionid=1686331461690001FeTn&videoId=6340661293112 + +{{< infobox >}} + This arcticle is part of a multi-part series. Be sure to check out / stay tuned for the other parts! + In this series we look at processor configuration options either from the OS side or directly + in MSRs of the CPU and their effect on the power draw of the CPU. +{{< /infobox >}} + +[Turbo Boost](https://en.wikipedia.org/wiki/Intel_Turbo_Boost) is the branded name from Intel for a technology called Dynamic Frequency and Voltage Scaling (DVFS). + +DVFS is available in every modern Intel or AMD CPU. the name in AMD CPUs is [Turbo Core](https://en.wikipedia.org/wiki/AMD_Turbo_Core) + +Turbo Boost can be checked through the linux subsystem, but also by querying the CPU registers directly. + +You can find a script to check if Turbo Boost is on or off on your system in our [Github Tools](https://github.com/green-coding-berlin/tools/blob/main/turbo_boost.sh) repository. + +Turbo Boost as a feature enables the processor to overshoot it's base frequency for a certain amount of time. This enables snappy responsiveness when instantaneous load happens. + +The downside of the feature is that it uses often exponentially more energy and can be detremential to energy cost of the system as a whole. + +The question that arises for someone who is doing research in software energy consumption is: +- How much energy / power does Turbo Boost / SMT need to provide it's functionality? +- What are the drawbacks of Turbo Boost? +- What speed achievements can I achieve? +- Should I turn Turbo Boost off when when energy is my primary concern? +- Does it help "running" to the completion of my calculation and then turn off? (Race to sleep) + +## Energy test + +Our test machine is a MacBook Pro 13" 2015 model with a Intel Core i7-5557U CPU @ 3.1 GHz. + +According to `/proc/cpuinfo` this chip has 2 physical cores (found by looking at max. **core id** number) and 4 threads +(found by looking at max. **processor** number). + +Looking at **flags** we see that **ht** is a feature, which corresponds to Hyper-Threading. + +In order to have a first glimpse at the energy characterisitcs of this feature we are using +`sysbench`, which you can just install through **aptitude** on **Ubuntu 22.04**. + +The command we ran in `sysbench` is: + +```bash +./turbo_boost.sh disable +sleep 180 +perf stat -a -e power/energy-pkg/ sysbench --cpu-max-prime=10000 --threads=48 --test=cpu --events=300000 --time=0 run +./turbo_boost.sh enable +sleep 180 +perf stat -a -e power/energy-pkg/ sysbench --cpu-max-prime=10000 --threads=48 --test=cpu --events=300000 --time=0 run +``` + +The command always runs for **10 s** fixed. What we modified during the runs is the **--threads** argument +as seen in the following table. + +This is the result: + +{{< table class="ui table" >}} +| Turbo Boost On/Off | Blockheating (Time) | Blockheating (Energy) | MacBook Pro 13" (Time) | MacBook Pro 13" (Energy) | Fujitsu TX1330 (Time) | Fujitsu TX1330 (Energy) | +|:-----------:|:----------:|:----------:|:---------:|:---------:|:---------:|:---------:| +| On | 10,13 s | 1635,11 J | 93,477 s | 1482,08 J | 68,1 s | 958,66 J | +| On | 10,13 s | 1636,11 J | 93,87 s | 1476,85 J | 68,34 s | 964,73 J | +| On | 10,14 s | 1640,56 J | 93,48 s | 1484,34 J | 68,36 s | 967,26 J | +| **AVG** | **10,13 s** | **1637,26 J** | **93,609 s** | **1481,09 J** | **68,26 s** | **963,55 J** | +| **STDDEV** | **0,0058** | **2,9013** | **0,226** | **3,8419** | **0,1447** | **4,4198** | +| **STDDEV %** | **0,057** | **0,1772** | **0,2415** | **0,2594** | **0,2119** | **0,4587** | +| | | | | | | | +| | | | | | | | +| On | 8,76 s | 1744,58 J | 86,84 s | 1781,17 J | 49,122 s | 1315,72 J | +| On | 8,76 s | 1742,85 J | 87,26 s | 1782,25 J | 49,12 s | 1315,33 J | +| On | 8,77 s | 1747,99 J | 85,95 s | 1782,09 J | 49,09 s | 1313,75 J | +| **AVG** | **8,76 s** | **1745,14 J** | **86,68 s** | **1781,84 J** | **49,11 s** | **1314,93 J** | +| **STDDEV** | **0,0058** | **2,6154** | **0,6689** | **0,5829** | **0,0179** | **1,0432** | +| **STDDEV %** | **0,0659** | **0,1499** | **0,7717** | **0,0327** | **0,0365** | **0,0793** | +| | | | | | | | +| | | | | | | | +| **Increase / Decrease** | **86,48 %** | **106,59 %** | **92,6 %** | **120,31 %** | **71,94 %** | **136,47 %** | + + + + +{{}} + + +An important question is however also: How does this compare in a real-world use-case? Will results be the same? + +We picked the Unit-Tests of the Django project (https://github.com/green-coding-berlin/example-applications/tree/main/django_tests) +and run the tests with Turbo-boost off and on. + +Important: If you have Turbo-Boost off and you have high loads on Hard-Disks the cut-off point when it makes sense to +turn on / off Turbo Boost will occur earlier. + +In a [Django Unit-Test run with Turbo Boost On](https://metrics.green-coding.io/stats.html?id=48bec2ad-7bb6-4278-bed9-4b4f9afa606e) we see that the CPU-Package power is at **21.3 W** and equates over +the runtime of **171 s** to an energy budget of **3645.30 J**. + +For the [Django Unit-Test run with Turbo Boost Off](https://metrics.green-coding.io/stats.html?id=b93ad091-4c70-447b-a828-598672c96d6e) we see that the CPU-Package power is at **12.87 W** and equates over +the runtime of **218 s** to an energy budget of **2805.90 J**. + +So even in this real-world scenarios we see here that Turbo-Boost is a detremential feature when looking at the CPU +Energy cost. +However, this is not the whole story. + +When looking at energy costs you always have to expand the picture as much as you can: +- How much auxilliary devices are running that also consume power? + + HDDs + + SDDs + + RAM + + etc. + +We see that the the CPU Package power dropped from 21.3 W -> 12.87 W (39.5 % reduction), but looking at the whole machine (*psu_power_ac_powerspy2_system*) +we see that the drop is not that significant 42.17 W -> 32.62 W (22.64 % reduction). +This makes perfect sense, as the CPU is only a part of the whole machine and the other components still run and are +not affected by Turbo Boost. + +We see in this particular case, that the gain is still beneficial. The total energy (*psu_energy_ac_powerspy2_system*) for +the Turbo Boost Off case is still lower (7113.10 J vs. 7215.67 J) + +If you are however in a datacenter, where the system is maybe externally cooled AND the cooling could be turned off if the +machine can also be turned off, then you would have a case where it would make sense to have Turbo Boost turned on +for this particular hardware setup. + +So you always have to make the scope as wide as you reasonably can and include all parts you use energy for. + +A + + +As seen in the charts and the table Hyper-Threading on our Intel CPU on the test bench +is always able to deliver more operations per 10 seconds. + +The energy with Hyper-Threading turned on exceeds the total amount of the non-Hyper-Threading configuration +of the chip when using 3 cores or more. +This was not necessarily expected ... it could also have been that the chip somehow throttles the performance +but uses a constant energy budget.. + +The other interesting metric is the mJ / Ops metric. Here we can see that Hyper-Threading +actually is more energy efficient per operation than running the system +only with physical cores. + + +## Discussion +The results are quite suprising as Hyper-Threading used to have a bit of a bad rep. + +For instance [this article from Percona](https://www.percona.com/blog/2015/01/15/hyper-threading-double-cpu-throughput/) comes to the conclusion that Hyper-Threading has rather +throttling features and typically is more suitable for low utilization workloads. + +Also [Hyper-Threading has potential security issues](https://www.theregister.com/2019/10/29/intel_disable_hyper_threading_linux_kernel_maintainer/) although the current state +and if it relevant in real world setups is not quite clear to us. + +Another factor to keep into consideration is that Hyper-Threading by theory reduced the +latency of your system when a task is picked up.\ +This makes perfect sense, as you introduce another scheduling layer.\ +However since a normal Linux installation is anyway not real-time workload optimized +this factor might not weigh very high. + +All in all we are very suprised about how energy friendly the feature is and especially +for the typical server workloads that are rather multi-threaded and mostly idling. +Since Hyper-Threading seems to have no effect on idle CPUs this seems like a perfect fit. + +Since Hyper-Threading is by default turned on, and also every server in the SPECPower database +has it turned on we see no reason to run benchmarks that should reflect CPU capabilities with +Hyper-Threading turned off. + + + + +So what is the conclusion? Machines should be setup like this in on-premise environments. +Then, if it is time to buy a new machine, because the current amount of machines cannot handle the amount of tasks anymore +it should first be checked what the uptake of buying this machine is + +If you can somehow quantify the time that you are loosing in energy, than make a trade-off calculation. + +If you would be buing new machines, then trade-off. + +If you typically have long idle or even shutdown periods in between and there is no good reason why +the calculation should be faster (which is often the case for many tasks in a system) -> off + + + +Did we miss something? Please shoot us an email to [info@green-coding.io](mailto:info@green-coding.io) \ No newline at end of file diff --git a/content/case-studies/cpu-power-capping.md b/content/case-studies/cpu-power-capping.md index 817f9c1..e927fed 100644 --- a/content/case-studies/cpu-power-capping.md +++ b/content/case-studies/cpu-power-capping.md @@ -16,7 +16,7 @@ socialmedia_preview: "img/social-media-previews/cpu-power-capping.webp" Power Capping is feature that will artifically limit the electrically supplied power of a hardware component with the intention of making it less warm and / or using less electrical power draw. -This feature has been featured in some recent publications that focus on AI trainings on GPUs with the intention of getting the +This feature has been featured in some recent publications that focus on AI trainings on GPUs with the intention of getting the energy consumption for their training down. [[1]](#sources) [[2]](#sources) [[3]](#sources) Since most compute workloads for transactional and operational work is still done on CPUs they make up a significant @@ -110,7 +110,7 @@ sudo apt install sysbench # we need this for a testing workload Once everything is installed you can check if the data can be read by issueing: ```console $ ls /sys/class/powercap/intel-rapl/ -enabled intel-rapl:0 power subsystem uevent +enabled intel-rapl:0 power subsystem uevent # There should be a directory named intel-rapl:0 which represents your one installed CPU chip $ cat /sys/class/powercap/intel-rapl/intel-rapl:0/name @@ -145,10 +145,10 @@ Now we can modify the power capping of the system live by just writing to these If, for example, we want to limit the power draw to **10 W** we issue: ```console -$ echo "10000000" | sudo tee /sys/class/powercap/intel-rapl/intel-rapl:0/constraint_0_power_limit_uw +$ echo "10000000" | sudo tee /sys/class/powercap/intel-rapl/intel-rapl:0/constraint_0_power_limit_uw # Set 10 W long_term -$ echo "10000000" | sudo tee /sys/class/powercap/intel-rapl/intel-rapl:0/constraint_1_power_limit_uw +$ echo "10000000" | sudo tee /sys/class/powercap/intel-rapl/intel-rapl:0/constraint_1_power_limit_uw # Set 10 W short_term ``` @@ -212,7 +212,7 @@ Threads fairness: events (avg/stddev): 2214.6250/1.32 execution time (avg/stddev): 10.0011/0.00 -... +... ``` @@ -220,7 +220,7 @@ We are now trying the benchmark with factory default (long_term is capped to **3 {{< table class="ui table" >}} -| Power Cap | Time (s) | Events | Energy CPU Package (J) | +| Power Cap | Time (s) | Events | Energy CPU Package (J) | |:---------------:|:------------------:|:------:|:----------------------:| | Factory default | 8.478750 +- 0.01% | 15000 | 190.67 +- 0.19% | | 100% | 8.478520 +- 0.02% | 15000 | 190.75 +- 0.02% | @@ -241,7 +241,7 @@ Here we see that around **80%** there seems to be a sweet spot where the power s An important question is now: How does this compare in a real-world use-case? And how are the results to be interpreted when we do look at the whole machine and not only the CPU? What happens if we factor in embodied carbon? Will results be the same? For this we are using our [Green Metrics Tool](/projects/green-metrics-tool/) and are measuring the unit test of the [Django](https://github.com/green-coding-berlin/django -) project. This is by design a **event-fixed** workload also. +) project. This is by design a **event-fixed** workload also. We have put all measurements online on the [Green Metrics Tool Dashboard](https://metrics.green-coding.io/timeline.html?uri=https://github.com/green-coding-berlin/django&filename=usage_scenario.yml&branch=main&machine_id=6&start_date=2023-12-01&end_date=2023-12-02) {{< rawhtml >}} @@ -273,7 +273,7 @@ We have put all measurements online on the [Green Metrics Tool Dashboard](https: {{< /rawhtml >}} -The different coloring in the charts show the different power cappings. +The different coloring in the charts show the different power cappings. - *Green* is **100%** - *Red* is **80%** - *Violet* is **60%** diff --git a/content/case-studies/cpu-utilization-usefulness.md b/content/case-studies/cpu-utilization-usefulness.md index 0f6f4e5..ee5488e 100644 --- a/content/case-studies/cpu-utilization-usefulness.md +++ b/content/case-studies/cpu-utilization-usefulness.md @@ -190,7 +190,7 @@ So taking a normal **Ubuntu 22.04** system on a **Fujitsu Esprimo 956** **4-Core
Dynamic CPU utilization low load
{{< /rawhtml >}} -What we see here is that the CPU utilization for our sample program, which shows up as a `bash`, when there is no other load on the +What we see here is that the CPU utilization for our sample program, which shows up as a `bash`, when there is no other load on the system clocks in at **9.6%** on a single core. This equals to **2.4%** on the total **4-core** system. Now we introduce some load with running a `stress` process on two cores keeping these cores fully busy. @@ -242,7 +242,7 @@ to any process load that is found on the system. This is a bit confusing at first, as the system also has an idle load ... so, as discussed earlier, if the scheduler puts cores on the idle thread they still consume energy, which will be reported by RAPL, but assinging them to a very low load process that might not even be on the core sounds counter-intuitive. -This is also [not documented on that form](https://hubblo-org.github.io/scaphandre-documentation/explanations/how-scaph-computes-per-process-power-consumption.html), but we assume it is a voluntary design decision. +This is also [not documented on that form](https://hubblo-org.github.io/scaphandre-documentation/explanations/how-scaph-computes-per-process-power-consumption.html), but we assume it is a voluntary design decision. It also feels like it, because otherwise [Scaphandre](https://github.com/hubblo-org/scaphandre) would have to calculate the idle load on the sytem first, but it starts right away with giving out numbers. @@ -264,8 +264,8 @@ Now we put some load on the system and see how [Scaphandre](https://github.com/h {{< /rawhtml >}} -We see again the about same utilzation values for our `bash` process, and we see that [Scaphandre](https://github.com/hubblo-org/scaphandre) -reports **~ 0.34 W** when we just run the `read_cpu.sh` on the system and **~ 0.43 W** when +We see again the about same utilzation values for our `bash` process, and we see that [Scaphandre](https://github.com/hubblo-org/scaphandre) +reports **~ 0.34 W** when we just run the `read_cpu.sh` on the system and **~ 0.43 W** when the system is stressed with two other `stress` processes. Also we see that the numbers that [Scaphandre](https://github.com/hubblo-org/scaphandre) is reporting per process do not add up to the total Package Power. @@ -285,7 +285,7 @@ If we now multiply **24.71 W \* 0.01075** we get **0.27 W** For the un-stressed machine where just the `read_cpu.sh` runs it follows: **1.99 W \* (0.093 / 4)** = **0.046 W** -These values are strongly different from what Scaphandre reports. Especially when there is a higher idle time, as it +These values are strongly different from what Scaphandre reports. Especially when there is a higher idle time, as it is in the unloaded case, these values strongly differ. ## Instructions through perf @@ -341,7 +341,7 @@ Since the energy loads on the system are quite identical, we will use the same e {{< /rawhtml >}} ``` -Statistics for only read_cpu.sh running: +Statistics for only read_cpu.sh running: - 3748521765 Instructions read_cpu.sh (+/- 10%) - 3976515696 Total System read_cpu.sh (+/- 10%) - Ratio: 0.9426648985116944 @@ -368,10 +368,10 @@ performance. | Methodlogy | load | Power [W] | |:-----------:|:----------:|:----------:| | Scaphandre | loaded | 0.43 | -| CPU% | loaded | 0.27 | -| perf loaded | loaded | 0.16 | -| Scaphandre | unloaded | 0.34 | -| CPU% | unloaded | 0.046 | +| CPU% | loaded | 0.27 | +| perf loaded | loaded | 0.16 | +| Scaphandre | unloaded | 0.34 | +| CPU% | unloaded | 0.046 | | perf | unloaded | 0.19 | {{}} @@ -396,10 +396,10 @@ Because if we would have system that has a fixed CPU frequency the load-performa {{< /rawhtml >}} -In this example we have run `sysbench --cpu-max-prime=25000 --threads=1 --time=10 --test=cpu --events=0 --rate=0` and +In this example we have run `sysbench --cpu-max-prime=25000 --threads=1 --time=10 --test=cpu --events=0 --rate=0` and put a CPU % limiting on the process and increased that in 10% increments. The blue curve has been done with the *schedutil* CPU frequency govenor which dynamically scales the CPU frequency. -And the red curve has been done with the performance scaling govenor which scales the CPU frequency to a maximum as +And the red curve has been done with the performance scaling govenor which scales the CPU frequency to a maximum as soon as even a minimum amount of load happens on a core. Let's look at how different cloud vendors have this setting for CPU frequency and *Turbo Boost* set, so we can later diff --git a/content/case-studies/devops-energy-savings-reducing-docker-container-size.md b/content/case-studies/devops-energy-savings-reducing-docker-container-size.md index d6579be..9848737 100644 --- a/content/case-studies/devops-energy-savings-reducing-docker-container-size.md +++ b/content/case-studies/devops-energy-savings-reducing-docker-container-size.md @@ -10,25 +10,25 @@ authorlink: "https://www.linkedin.com/in/djesic-613732152/" Would a smaller image size reduce the total energy required to run it? And what are the trade-offs for that? -We created images on [our Docker Hub](https://hub.docker.com/u/greencoding) that contain an environment for running [Puppeteer](https://github.com/puppeteer/puppeteer) that we use for testing. +We created images on [our Docker Hub](https://hub.docker.com/u/greencoding) that contain an environment for running [Puppeteer](https://github.com/puppeteer/puppeteer) that we use for testing. They are based of off an Ubuntu base image and include a browser and Puppeteer installed via npm. - https://hub.docker.com/u/greencoding - https://github.com/green-coding-berlin/example-applications/tree/main/puppeteer-firefox-chrome -We started with the assumption that reducing the image size would require less energy +We started with the assumption that reducing the image size would require less energy from users that download the image from Docker Hub, but also increase the build time. And that this tradeoff would have a compounding effect for successive downloads. -One of the possible approaches to reducing the size of our images was to change the base image -away from Ubuntu and towards something that still met the requirements for our dependencies +One of the possible approaches to reducing the size of our images was to change the base image +away from Ubuntu and towards something that still met the requirements for our dependencies but at a smaller size, causing us to consider [Alpine](https://www.alpinelinux.org/). -Because Alpine uses a different package manager `apk`, and has difference to -what Ubuntu's `apt` has to offer, we needed to make sure that our image is still functional. -This involved a little bit of trial and error to see what dependencies are required to get +Because Alpine uses a different package manager `apk`, and has difference to +what Ubuntu's `apt` has to offer, we needed to make sure that our image is still functional. +This involved a little bit of trial and error to see what dependencies are required to get a browser with a GUI running from a container based off of an Alpine image. However, we are talking 10-15 Mins here! -Another optimization for building docker images is making sure that it is built using [buildkit](https://docs.docker.com/build/buildkit/). +Another optimization for building docker images is making sure that it is built using [buildkit](https://docs.docker.com/build/buildkit/). On our machine it was enabled by default, but in case it is not the default, you can specify it as: ```code @@ -55,7 +55,7 @@ And these are the build results: | New Chrome | 478.01 J | 1,427.71 J | 90.32 J | 101.18 s | {{}} -The results were similar for our two images where our efforts were focused; +The results were similar for our two images where our efforts were focused; - The image with Puppeteer running in Chrome was ~600MB smaller (1650MB -> 1070MB) + and it required ~45 less seconds of build time (147s -> 101s). @@ -68,9 +68,9 @@ Docker images get compressed before being uploaded, and we can replicate the com - Firefox image: 347MB -> 282MB - Chrome image: 552MB -> 350MB -This does not however mean that Alpine is a silver bullet for slimmer images. -A prominent pitfall to avoid is using Alpine for Python based projects, -as there are no wheels (way in which Python packages are bundled) for Alpine, +This does not however mean that Alpine is a silver bullet for slimmer images. +A prominent pitfall to avoid is using Alpine for Python based projects, +as there are no wheels (way in which Python packages are bundled) for Alpine, resulting in packages needing to be built from source and drastically increasing build times. Considering that the build time and compressed image size are decreased, this optimization resulted in a net energy reduction. 🎉 diff --git a/content/case-studies/rapl-and-sgx.md b/content/case-studies/rapl-and-sgx.md index 1840635..a4b509c 100644 --- a/content/case-studies/rapl-and-sgx.md +++ b/content/case-studies/rapl-and-sgx.md @@ -8,7 +8,7 @@ authorlink: "https://www.linkedin.com/in/arne-tarara" --- -RAPL stands for Running Average Power Limit. It is a power estimation feature in +RAPL stands for Running Average Power Limit. It is a power estimation feature in modern x86 CPUs from Intel and AMD. In the green software community it is extensively used in order to get accurate @@ -32,12 +32,12 @@ coined [Platypus](https://platypusattack.com/) Moritz Lipp showed that it is possible to read the current executed processor instructions and also the memory layouts especially for data stored in the believed to be secure memory enclave from Intel called [SGX](https://en.wikipedia.org/wiki/Software_Guard_Extensions) -SGX (Software Guard Extension) is a feature which allows the processor to create an +SGX (Software Guard Extension) is a feature which allows the processor to create an enclave in the memory that cannot be accessed even by code running in lower access rings than the current code. -Intel has reacted directly with a [microcode update](https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/advisory-guidance/running-average-power-limit-energy-reporting.html) that results in distorting +Intel has reacted directly with a [microcode update](https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/advisory-guidance/running-average-power-limit-energy-reporting.html) that results in distorting the RAPL signal when SGX is enabled in the system. Alternative to that the user may also set a register in the processor to activate the so called *energy filtering* even when SGX is disabled. @@ -52,7 +52,7 @@ Intel says that the actual RAPL data might be skewed by up to 50% of the origina {{< /rawhtml >}} -What we have not found so far is any script or data that reproduces this behaviour +What we have not found so far is any script or data that reproduces this behaviour and shows the distortion in action. {{< rawhtml >}} @@ -79,7 +79,7 @@ and shows the distortion in action. Finding an SGX enabled or energy filtering enabled machine {{< /greenblock >}} -We first tried all the machines that we had lying around that according to Intel hat SGX +We first tried all the machines that we had lying around that according to Intel hat SGX on the chip or activateable through Intel ME: - Our Surface Book 1 and 2 have the capability according to Intel, but Microsoft custom BIOS cannot enable it:https://www.reddit.com/r/Surface/comments/7z1kmz/intel_sgx_extensions_arent_enabled_in_uefi_cant/ @@ -93,7 +93,7 @@ on the chip or activateable through Intel ME: We then resorted to going to the cloud, as there are way more CPUs availabe to us then we have at home. -Sadly (at least for our case :) ) [SGX is usually always disabled in cloud environments](https://tozny.com/blog/secure-computation-cloud-sgx/). AWS even rolls it's own +Sadly (at least for our case :) ) [SGX is usually always disabled in cloud environments](https://tozny.com/blog/secure-computation-cloud-sgx/). AWS even rolls it's own enclave called [Nitro Enclaves](https://aws.amazon.com/ec2/nitro/nitro-enclaves/). However, if you rent a bare metal EC2 machine (either the .metal or the largest option) @@ -140,7 +140,7 @@ on idle for **5 Minutes** with our [low overhead MSR RAPL checking reporter](htt {{< /rawhtml >}} -Raw data: +Raw data: - [ec2_m5.metal_idle_p0_energy_filtering_off.csv](/files/ec2_m5.metal_idle_p0_energy_filtering_off.csv) - [ec2_m5.metal_idle_p1_energy_filtering_off.csv](/files/ec2_m5.metal_idle_p1_energy_filtering_off.csv) - [ec2_m5.metal_idle_p0_energy_filtering_on.csv](/files/ec2_m5.metal_idle_p0_energy_filtering_on.csv) @@ -153,14 +153,14 @@ Summary We tested the system only on idle and as we can cleary see in the graphs the signal is (apart from three small outlier spikes) very close to the mean. -The distorted signal with *energy filtering* turned on is not only extremely noise and has a very high variance, it also has suprisingly a higher +The distorted signal with *energy filtering* turned on is not only extremely noise and has a very high variance, it also has suprisingly a higher mean than the non-filtered signale. We would have expected that at least the mean over a longer period of time would stay the same ... but maybe 5 Minutes are not enough to get a solid average. -The results are so strong in effect that even this setup, which allows only for qualitative conclusion is already sufficient to say that -an active *energy filtering* results in an unusable signal. Maybe even the average over a long time might not be of any use ... but this +The results are so strong in effect that even this setup, which allows only for qualitative conclusion is already sufficient to say that +an active *energy filtering* results in an unusable signal. Maybe even the average over a long time might not be of any use ... but this needs further investigation. The take away for us is to incorporate guard clauses in all our tools to check for this feature and abort any measurements with an error if active. \ No newline at end of file diff --git a/content/case-studies/wordpress-vs-hugo-cloudflare.md b/content/case-studies/wordpress-vs-hugo-cloudflare.md index fe81e54..c0f867a 100644 --- a/content/case-studies/wordpress-vs-hugo-cloudflare.md +++ b/content/case-studies/wordpress-vs-hugo-cloudflare.md @@ -11,9 +11,9 @@ authorlink: "https://www.linkedin.com/in/arne-tarara" {{< /rawhtml >}} -In this case study we will look the carbon benefit of a static site. +In this case study we will look the carbon benefit of a static site. -Site generators like [HUGO](https://www.gohugo.io) are currently all the hype. The idea is to write your website in simple markdown but still benefit from nice styling and templating. +Site generators like [HUGO](https://www.gohugo.io) are currently all the hype. The idea is to write your website in simple markdown but still benefit from nice styling and templating. The result will be a one-time generated page which saves server compute resources, is faster to deliver and, hopefully, also save carbon emissions. @@ -33,7 +33,7 @@ Also we will talk about hosting, as we will see that just moving your site to Wo + We will spin up the basic wordpress with the same template and just make a request to the root page -- CURL vs. Firefox +- CURL vs. Firefox - Exclude functionality for containers in measurement! @@ -184,13 +184,13 @@ Also we will talk about hosting, as we will see that just moving your site to Wo

- +
Measurements Disclaimer
All energy measurements and / or benchmarks on a normal operating system are by nature error prone and uncomparable. Please never compare our values with values on your system. Measurements of software can only ever be compared on the exact same system. Also measurements should never be seen as ground truth, but only as indicator of the order of magnitude.

-
+
@@ -202,7 +202,7 @@ Also we will talk about hosting, as we will see that just moving your site to Wo

The chart on the right side shows the relation of the HUGO build process (~1.4 J), the HUGO energy per web request (~1 J) and the energy for a web request o Wordpress (~10 J)

Since we have repeated the measurements 3 times we attached the error bars, which represent the 95% confidence interval.

The measurements are already an order of magnitude off, which is kind of what we assumed from the start. A static site is enormously more efficient, even if you also include the build process. Even for this simple setup it is around 10x.

-

Note: If you want to drill down on the details of the measurement like: How long was the pre-heat time of the CPU, how long was the pre-idle time, which CPU was used, what was the measurement resolution etc. please check the details on the links in the box above.

+

Note: If you want to drill down on the details of the measurement like: How long was the pre-heat time of the CPU, how long was the pre-idle time, which CPU was used, what was the measurement resolution etc. please check the details on the links in the box above.

-

+

When optimizing for low carbon and low energy hosting is a very important topic.

If you were just to replace the Wordpress site with a static site but the server will continue to run in idle it might be that your total energy savings are very minimal.

If you look at the simplified load to energy curve from Microsoft you can see that just by having the machine in idle you are already using a significant portion of energy.

diff --git a/content/co2-formulas.de.md b/content/co2-formulas.de.md new file mode 100644 index 0000000..e67b103 --- /dev/null +++ b/content/co2-formulas.de.md @@ -0,0 +1,191 @@ +--- +title: "CO2 Formulas" +draft: false +summary: "Formeln zur Umrechnung von GB in CO2e oder von kWH in CO2e usw." +date: 2022-07-21 08:00:00 +author: "Arne Tarara" +authorlink: "https://de.linkedin.com/in/arne-tarara" + +--- + +Bei der Verwendung von Software ist die typische Einheit, die wir direkt erleben, oft die Zeit. Software ist entweder schnell oder langsam, +aber wir denken in der Regel nicht über den CO2-Fußabdruck von Software nach. + +Software verbraucht Energie durch die Nutzung der zugrundeliegenden Hardware (in der Regel in Watt gemessen). Diese Watt müssen irgendwoher kommen, und wir werden sehen, dass jede Energiemenge in der Regel mit einem CO2 verbunden ist. + +Auch Software verbraucht Energie, wenn sie Netzwerkanfragen stellt, und ist indirekt für den "embodied carbon" verantwortlich, d. h. das CO2, das bei der Herstellung der von ihr verwendeten Hardware freigesetzt wird. + +Auf dieser Seite wollen wir beleuchten, wie man von typischen Werten wie **Zeit** oder **Megabyte** zu **kWh** und schließlich zu **CO2** kommt. + +{{< rawhtml >}} +
+
+ +
+
+
Liste von CO2 Formeln
+
+
+
+ +

+ +

Wenn Sie die Kosten für die Übertragung einer Datenmenge über das Internet beziffern wollen, müssen Sie jeden Schritt des Pakets messen und die Kosten für Router, Kabel, Sendemasten usw. addieren.

+

Da diese Messdaten nicht zur Verfügung stehen, wird eine Heuristik verwendet, die die Kosten der gesamten Netzausrüstung, durch die die Daten fließen müssen, auf der Grundlage der tatsächlich übertragenen Datenmenge in GB schätzt.

+

Wenn Sie den Wert bereits in GB haben, können Sie ihn einfach mit einem konstanten Faktor in kWh umrechnen.

+

Diese scheinbar einfache Formel enthält jedoch eine Menge Annahmen und verwendet Durchschnittswerte.

+

Typischerweise folgen diese Ansätze entweder einem Top-Down-Ansatz, bei dem die Stromrechnung eines Telekommunikationsanbieters und dann die Netzübertragungsberichte betrachtet werden, um die beiden Zahlen zu teilen.

+

Andere Ansätze bestehen darin, den Weg einiger Beispiel-Pakete wirklich zu verfolgen und jedes einzelne Netzgerät auf dem Weg zu untersuchen und dann den Stromverbrauch nur für die Übertragung zu ermitteln.

+

Es gibt auch andere Ansätze, aber alle haben Vor- und Nachteile. Die Zahl 0.06 kWh / GB ist bereits ein Mix, die das Beste aus all diesen Ansätzen herausholen soll.

+

In der Studie, die in der unten angegebenen Quelle [1] verlinkt ist, wird auch darauf hingewiesen, dass die Stromintensität durch Fortschritte bei der Effizienz alle zwei Jahre um etwa die Hälfte sinkt, und es wird angenommen, dass eine Extrapolation für die kommenden Jahre eine gültige Annahme sein könnte. Dies bring den Wert für den Umrechnungsfaktor in 2023 runter auf 0.00375 kWH / GB

+

Dieses Modell ist jedoch nicht ohne Kritik, da die meisten Netzwerkkomponente ca. 80 % ihrer maximalen Leistungsaufnahme bereits im Leerlauf verbrauchen. Der Rest ist ein proportionaler Faktor, der von der Nutzung der möglichen Bandbreite abhängt. Es gibt Ansätze um dieser Realität der tatsächlichen Stromkosten für Anwendungen, die das Netzwerk nutzen, besser abzubilden, doch haben diese sog. Time-Sharing- oder Data-Sharing-Modelle alle unterschiedliche Nachteile [2].

+

Wir haben uns entschieden, in unseren Tools den linearen Ansatz der Zuordnung von Netzübertragungen zum Stromverbrauch zu verwenden. Dieser bietet für den Nutzer den besten Anreizden Netzverkehr auf ein Minimum zu beschränken. Unabhängig davon, welche der derzeit bekannten Berechnungsmethoden verwendet wird, ist sie immer noch ungenau. Keine der derzeitigen Methoden kann die tatsächlichen Kosten für die Bereitstellung neuer Hardware für die Bandbreitenerhöhung zuverlässig vorraussagen. Der zusätzliche CO2-Ausstoß fällt nämlich immer erst dann wenn neue Netzwerkkomponente hinzugefügt werden. Nicht wenn mehr Datentraffic entsteht.

+

Betrachtete Netzwerkkomponente

+

Wichtig für das Verständnis und die Vergleichbarkeit der berechenten Werte ist welche Teile des Netzwerk in die Betrachung mit einbezogen werden. In der Abbildung rechts sehen Sie die einbezogenen Komponenten. Nur das Zugangsnetz und das IP-Kernnetz sind enthalten, d.h. die Verbindung zwischen Rechenzentren und Telekommunikationsanbietern.

+

Nicht enthalten sind die Teile innerhalb der Rechenzentren und auch keine Endnutzergeräte oder deren WLAN/LAN im Haus. Die Berechnung geht nur bis zur *Bordsteinkante*.

+

Ebenfalls wichtig: Nur Festnetzübertragungen sind hier enthalten. Kein mobiler Datenverkehr, der in der Regel mindestens um das 2-3fache energieintensiver ist.

+

Wert in Gigabytes

+
+
+
1
+
Gigabytes
+
+
x
+
+
0.06
+
GB / kWh
+
+
=
+
+
0.06
+
kWH
+
+
+

Wenn Ihr Wert in Megabytes angegeben ist, muss der Umrechnungsfaktor durch folgende Zahl geteilt werden 1.000 und ergibt sich zu 0.00006 MB / kWh

+

[1] Untangling the estimates

+

[2] Daniel Schien, Paul Shabajee, Chris Preist. “Rethinking Allocation in High-Baseload Systems: A Demand-Proportional Network Electricity Intensity Metric.”

+
+ +
+ +

+ +

Diese Umrechnung ist wahrscheinlich die relevanteste.

+

Damit meinen wir, dass die Unternehmen bereits die aktuelle Intensität ihres Netzes kennen und die Arbeitsbelastung entsprechend planen wollen.

+

Die aktuelle Intensität des Stromnnetztes kann z.B. über Electricitymap.com abgerufen werden. In unseren Fall, in Deutschland, ist der Wert 317 gCO2e/kWh

+

Weltweite durchschnittliche Stromnnetzintensität

+

Wenn Ihre Workload über mehrere Länder verteilt ist oder Sie überhaupt nicht wissen, wo Ihre Arbeitslast läuft, dann ist es am besten, den globalen Durchschnitt zu nehmen.

+

Für 2021 ist dieser Wert: 442 gCO2e/kWh

+

Setzt man diese Zahl in eine Berechnung ein, die mit kWh beginnt, kommt man direkt auf gCO2e, was Gramm CO2-Äquivalent bedeutet. Da nicht jeder chemische Prozess reines CO2 erzeugt, werden diese alle auf das äquivalente Klimaerwärmungspotenzial von CO2 umgerechnet, was zu CO2e führt

+
+
+
1
+
kWh
+
+
x
+
+
442
+
gCO2e/kWh
+
+
=
+
+
442
+
gCO2e
+
+
+

Quelle: Ember Climate

+
+ +
+ +

+

Einige Energie-Mess-Tools (wie z.B. Intel RAPL) geben *Joules* als Ausgabewert.

+

Der tatsächliche SI-Einheitswert von Joule ist Ws. Um also auf kWh zu kommen, muss man zuerst auf Stunden (60*60) kommen und dann auf *Kilo*, was bedeutet, dass man durch tausend dividieren muss

+

Joules zu kWh

+
+
+
1
+
Joule
+
+
/
+
+
(60*60)
+
(um Stunden zu bekommen)
+
+
/
+
+
(1.000)
+
(um *kilo* zu bekommen)
+
+
=
+
+
0.000000277...
+
kWH
+
+
+

Und umgekehrt:

+

kWh zu Joules

+
+
+
1
+
kWh
+
+
*
+
+
(60*60)
+
(Stunden auskürzen)
+
+
*
+
+
(1.000)
+
(*kilo* auskürzen)
+
+
=
+
+
3.600.000
+
Joules
+
+
+
+ +
+ +

+

Wenn Sie die Wattzahl nicht direkt messen können (mit einem externen Leistungsmesser, Intel RAPL usw.), können Sie immer Datenblätter oder Benchmarks verwenden und interpolieren, um Ihren Leistungswert zu ermitteln.

+

Ein naiver Ansatz zur Schätzung des Energieverbrauchs eines Prozessors wäre die Verwendung seiner Thermal Design Power (TDP).

+

Dies gibt Ihnen einen ersten Eindruck davon, in welcher Größenordnung Ihr erwarteter Energiewert liegt.

+

Beispiel: Der Intel i7-5557U hat einen TDP von 28 W

+

Wenn wir eine Berechnung von 5 Sekunden durchführen würde, würden wir einen Energieverbrauch von 140 Ws (28 W * 5 s), aka 140 J erwarten .

+

Wenn man sich eine CPU Messung von einem vollen CPU Workload anguckt wird man sehen das der Wert jedoch real eher bei ~60 J liegt .

+

Daraus können wir schließen, dass die TDP eine sehr grobe Schätzung ist und als gute Obergrenze dient. Aber es überschätzt die tatsächliche Energie um einiges.

+
+ +
+ +

+

Ein Benchmark ist eine weitere Möglichkeit, von der Laufzeit auf kWh zu gelangen.

+

TODO

+

Quelle: David Mytton

+
+ +{{< /rawhtml >}} diff --git a/content/co2-formulas.md b/content/co2-formulas.md index 6c4c042..4b1406a 100644 --- a/content/co2-formulas.md +++ b/content/co2-formulas.md @@ -82,7 +82,7 @@ On this page we want to hightlight how to get from typical values like **time**

By most often done we mean that companies already get the current intensity of their grid and schedule workloads accordingly.

The current intensity of the grid can for instance be retrieved from Electricitymap.com. In our case in Germany the number is at the moment 317 gCO2e/kWh

Worldwide average grid carbon intensity

-

If you can choose the number for the grid where your workload is running. If your workload is distributed over mutliple countries or you don't know at all where your workload is running, then the best way is to take the global average.

+

If your workload is distributed over multiple countries or you don't know at all where your workload is running, then the best way is to take the global average.

For 2021 this number is: 442 gCO2e/kWh

So if we were to plug in this number into a calculation starting with kWh we get directly to gCO2e, which means gramms of CO2 equivalent. Since not every checmical process generates pure CO2 they are all converted to the equivalent climate warming potential of CO2, which results in CO2e

@@ -111,10 +111,9 @@ On this page we want to hightlight how to get from typical values like **time**

-

Some energy budgeting tools or internal power meters (like Intel RAPL) give you Joules as an output value.

-

Measuring energy directly through an electrical circuit is usually the premium category, so this value is the best you can get.

+

Some energy budgeting tools or internal power meters (like Intel RAPL) give you *Joules* as an output value.

The actual SI-Unit value of Joules is Ws. So in order to get to kWh you have to first get to hours (60*60) and then get to *kilo*, which means to divide by a thousand

-

Joules to kWh

+

Joules zu kWh

1
diff --git a/content/jobs/green-software-developer.md b/content/jobs/green-software-developer.md index 35ef66f..6c0ad7a 100644 --- a/content/jobs/green-software-developer.md +++ b/content/jobs/green-software-developer.md @@ -1,9 +1,9 @@ --- title: "Green Software Developer" -description: "Open role for Green Software Developer with 5+ years experience at Green Coding Berlin GmbH to work on sustainable software." +description: "Open role for Green Software Developer with 5+ years experience at Green Coding Solutions GmbH to work on sustainable software." --- -At Green Coding Berlin we create a green software community and ecosystem in Berlin. +At Green Coding Solutions we create a green software community and ecosystem in Berlin. Our goal is to shape the industry for the better in terms of using software in a sustainable way. We strongly believe in the positive impact of digitalization. But we believe it must be done responsibly with awareness of consumption and with strong planning to avoid rebound effects. @@ -32,7 +32,7 @@ We also currently work on CO2 emission reporters for different CI-Pipelines as w ### How your typical week looks like -When working at Green Coding Berlin GmbH as an engineer, the work is quite different from traditional coding jobs. +When working at Green Coding Solutions GmbH as an engineer, the work is quite different from traditional coding jobs. Working in CO2 measurement of software is currently still a very academic profession. Therefore your weekly work is typically divided into reseach (reading Computer Science papers, articles and proof-of-concept code) and writing prototypes, tests and tools. diff --git a/content/jobs/junior-green-software-developer.md b/content/jobs/junior-green-software-developer.md index 071465e..8694748 100644 --- a/content/jobs/junior-green-software-developer.md +++ b/content/jobs/junior-green-software-developer.md @@ -1,10 +1,10 @@ --- title: "Junior Green Software Developer" -description: "Open role for Junior Green Software Developer at Green Coding Berlin GmbH to work on sustainable software." +description: "Open role for Junior Green Software Developer at Green Coding Solutions GmbH to work on sustainable software." draft: true --- -At Green Coding Berlin we create a green software community and ecosystem in Berlin. +At Green Coding Solutions we create a green software community and ecosystem in Berlin. Our goal is to shape the industry for the better in terms of using software in a sustainable way. We strongly believe in the positive impact of digitalization. But we believe it must be done responsibly with awareness of consumption and @@ -29,7 +29,7 @@ See examples in our [Case Studies](https://www.green-coding.io/case-studies), ou ### How your typical week looks like -When working at Green Coding Berlin GmbH as an engineer, the work is quite different from traditional coding jobs. +When working at Green Coding Solutions GmbH as an engineer, the work is quite different from traditional coding jobs. Working in CO2 measurement of software is currently still a very academic profession. Therefore our team does a lot of research and writing prototypes, tests and tools. diff --git a/content/projects/cloud-energy.de.md b/content/projects/cloud-energy.de.md new file mode 100644 index 0000000..ebaff6d --- /dev/null +++ b/content/projects/cloud-energy.de.md @@ -0,0 +1,46 @@ +--- +title: "Cloud Energy" +date: 2023-01-13 19:00:00 +publishDate: 2023-01-13 +draft: false +icon: "cloud" +--- + + +{{< rawhtml >}} +XGBoost performance +
+{{}} + +Da es in der Cloud oft nicht möglich ist, Energie direkt zu messen, haben wir ein Machine Learning Modell erstellt, basierend auf Daten von [SPECPower](https://www.spec.org/power_ssj2008/). + +Die Basis des Modells basiert auf einem [Forschungspapier](https://interactdc.com/static/images/documents/Elsevier_Journal.pdf) von [Interact DC](https://interactdc.com/) und der University of East London. + +Unser Modell ermöglicht die Inline-Messung in Watt sowie die Energiebudgetierung in Joule mit vielen optionalen Eingabeparametern, um das Modell genauer zu machen. + +In der Grafik rechts sehen Sie die Leistung für eine Out-of-Sample-Vorhersage. Weitere Details zu In-Sample-Vorhersagen, explorativer Datenanalyse und Anwendungsdokumentation finden Sie auf Github. + +Das Modell ist Open-Source und unter der [AGPLv3-Lizenz](https://github.com/green-coding-berlin/green-metrics-tool/blob/main/LICENSE) lizenziert. + +--- +{{< button "code branch" "Github incl. Dokumentation" "https://github.com/green-coding-berlin/spec-power-model" >}} + +  + + +``` +$ ./static-binary | python3 xgb.py --make intel -- cpu-freq 2600 --ram 7 --cpu-threads 24 +191.939294374113 +169.99632303510703 +191.939294374113 +191.939294374113 +191.939294374113 +191.939294374113 +194.37740205685841 +191.939294374113 +169.99632303510703 +191.939294374113 +.... +``` + +--- diff --git a/content/projects/cloud-energy.md b/content/projects/cloud-energy.md index 6153c5e..f5ebb97 100644 --- a/content/projects/cloud-energy.md +++ b/content/projects/cloud-energy.md @@ -13,8 +13,6 @@ icon: "cloud" {{}} - - Since in the cloud it is often not possible to measure energy directly we have created a Machine Learning estimation model based on the data from [SPECPower](https://www.spec.org/power_ssj2008/) @@ -30,19 +28,12 @@ The model is open-source [AGPLv3 Licensed](https://github.com/green-coding-berli --- -{{< rawhtml >}} - -
- -
- - Github incl. documentation - -
-{{< /rawhtml >}} +{{< button "code branch" "Github incl. documentation" "https://github.com/green-coding-berlin/spec-power-model" >}} + +  ``` -$ ./static-binary | python3 xgb.py --make intel -- cpu-freq 2600 --ram 7 --cpu-threads 24 +$ ./static-binary | python3 xgb.py --make intel -- cpu-freq 2600 --ram 7 --cpu-threads 24 191.939294374113 169.99632303510703 191.939294374113 diff --git a/content/projects/eco-ci.de.md b/content/projects/eco-ci.de.md new file mode 100644 index 0000000..437e32e --- /dev/null +++ b/content/projects/eco-ci.de.md @@ -0,0 +1,89 @@ +--- +title: "Eco CI" +date: 2023-01-15 19:00:00 +publishDate: 2023-01-15 +draft: false +icon: "leaf" +--- + +Eco CI ist der interne Name für ein Projekt, in dem wir kleine Tools entwickeln, um CI-Pipelines in Bezug auf ihren Energieverbrauch transparenter zu machen und auch kleine Tools zu entwickeln, die Energie/CO2 in der Pipeline einsparen können. + +Unsere aktuelle Arbeit konzentriert sich auf Github Actions, eine der bisher größten kostenlosen Plattformen für kontinuierliche Integration. + +Durch die Integration unserer maßgeschneiderten Github Actions und Github Apps in Ihren Test-Workflow erhalten Sie eine automatische Abschätzung der Energiekosten des Workflow-Laufs. + +## Github Actions - Energieabschätzung + +Github Actions laufen auf Microsoft Azure VMs. In diesen VMs ist eine direkte Messung mit etwas wie RAPL leider nicht möglich. + +Wir nutzen unsere Arbeit aus dem [Cloud Energy](/de/projects/cloud-energy) Projekt, um den Energieverbrauch dieser Azure-VMs zu schätzen. + +Das Ergebnis ist eine leicht integrierbare Github-Aktion, mit der Sie die Energie in Joule für den CI-Lauf erhalten. Es ist auch möglich, nur Ergebnisse für einen Teil des CI-Laufs zu erhalten. + +{{< rawhtml >}} +
+ +
Github Aktionen Energieabschätzung
+
+{{< /rawhtml >}} + + +{{< rawhtml >}} +
+ +
Demo der Eco-CI-Funktion im Green Metrics Tool
+
+{{< /rawhtml >}} + + +{{< button "code branch" "Github" "https://github.com/green-coding-berlin/eco-ci-energy-estimation" >}} + +{{< button "shopping bag" "Github-Marktplatz" "https://github.com/marketplace/actions/eco-ci-energy-estimation" >}} + +{{< button "eye" "Live-Vorschau" "https://github.com/green-coding-berlin/green-metrics-tool/actions/runs/4720202654" >}} + +{{< button "chartline" "CI Energiekostenüberwachung" "https://metrics.green-coding.io/ci.html?repo=green-coding-berlin%2Fgreen-metrics-tool&branch=dev&workflow=45267392" >}} + + +  + +--- + +## Github Aktion - Eco CI Activity Checker + +Der Eco CI Activity Checker wurde für CI-Workflows entwickelt, die nach einem bestimmten Zeitplan ablaufen. + +Oftmals werden diese Workflows auch dann ausgeführt, wenn in den letzten z.B. 24 Stunden kein einziger Commit stattgefunden hat, oder auch wenn +ein paar Minuten vor dem Lauf ein manueller Lauf ausgelöst wurde. + +Der Eco CI Activity Checker überspringt dann den Testlauf und spart so Energie und CO2. + +{{< button "book" "Blog-Artikel" "/blog/eco-ci-activity-checker-released/" >}} + +{{< button "code branch" "Github Repository" "https://github.com/green-coding-berlin/eco-ci-activity-checker" >}} + +{{< button "shopping bag" "Github Marktplatz" "https://github.com/marketplace/actions/eco-ci-activity-checker" >}} + +  + +--- + +## Gitlab Plugin + +Bitte beachten Sie für Gitlab unsere zentrale [Github repository Dokumentation] (https://github.com/green-coding-berlin/eco-ci-energy-estimation/blob/main/README.md#gitlab), die alles über die Gitlab-Funktionalität beinhaltet. + +{{< button "book" "Blog arcticle" "/blog/eco-ci-gitlab-release/" >}} + +{{< button "code branch" " Github Repository (für Gitlab Dokumentation)" "https://github.com/green-coding-berlin/eco-ci-energy-estimation/blob/main/README.md#gitlab" >}} + +  + +--- + +## Wie geht es mit dem Eco-CI-Projekt weiter? + +Eine weitere Einsparung kann durch die Umplanung von Arbeit, die nicht zeitabhängig ist, auf Zeiten wo das Stromnetzes "grüner" ist erzielt werden. Dies ist eine geplante Funktion, die bald als separate Aktion entwickelt wird. + +Auch hängende und im Leerlauf befindliche VMs werden derzeit in einem unserer Projekte angegangen, um sie abzuschalten und die Verschwendung von Leerlaufstrom zu stoppen + +Schauen Sie doch auch gerne in unseren [Blog](/blog) um auf dem Laufenden zu bleiben. diff --git a/content/projects/eco-ci.md b/content/projects/eco-ci.md index ce22a41..f554d5b 100644 --- a/content/projects/eco-ci.md +++ b/content/projects/eco-ci.md @@ -11,7 +11,7 @@ in terms of their energy usage and also develop small tools that can save energy Our current work focuses on Github Actions, one of the biggest free continous integration platforms to date. -By integrating our custom Github Actions and Github Apps into your testing workflow you get an automated estimation about the +By integrating our custom Github Actions and Github Apps into your testing workflow you get an automated estimation about the energy cost of the workflow run. ## Github Actions - Energy estimation @@ -20,7 +20,7 @@ Github Actions runs on Microsoft Azure VMs. In these VMs are direct measurement We are using our work from our [Cloud Energy project](/projects/cloud-energy) to estimate the energy used by these Azure VMs. -The result is an easily integrateable Github Action where you get the energy in Joules for the CI run. It is +The result is an easily integrateable Github Action where you get the energy in Joules for the CI run. It is also possible to get only results for part of the CI run. @@ -39,86 +39,32 @@ also possible to get only results for part of the CI run. {{< /rawhtml >}} +{{< button "code branch" "Github Repository" "https://github.com/green-coding-berlin/eco-ci-energy-estimation" >}} - - -{{< rawhtml >}} - -
- -
- - Github Repository - -
- -
- -
- - Github Marketplace - -
- -
- -
- - Live preview on Github Actions - -
- -
- -
- - Our CI Energy Cost monitoring - -
+{{< button "shopping bag" " Github Marketplace" "https://github.com/marketplace/actions/eco-ci-energy-estimation" >}} +{{< button "eye" "Live preview on Github Actions" "https://github.com/green-coding-berlin/green-metrics-tool/actions/runs/4720202654" >}} -{{< /rawhtml >}} +{{< button "chartline" "CI Energy Cost monitoring" "https://metrics.green-coding.io/ci.html?repo=green-coding-berlin%2Fgreen-metrics-tool&branch=dev&workflow=45267392" >}}   ---- +--- ## Github Action - Eco CI Activity Checker The Eco CI Activity Checker was designed for CI workflows that run on a scheduled basis. -Often these run happen even if there was not even a single commit in the last ex. 24 hours, or even if there +Often these run happen even if there was not even a single commit in the last ex. 24 hours, or even if there was a manual run triggered just a couple minutes before the run. The Eco CI Activity checker skips the test run then and thus saves energy and CO2. -{{< rawhtml >}} - -
- -
- - Blog arcticle - -
- -
- -
- - Github Repository - -
- -
- -
- - Github Marketplace - -
-{{< /rawhtml >}} +{{< button "book" "Blog article" "/blog/eco-ci-activity-checker-released/" >}} + +{{< button "code branch" "Github Repository" "https://github.com/green-coding-berlin/eco-ci-activity-checker" >}} + +{{< button "shopping bag" "Github Marketplace" "https://github.com/marketplace/actions/eco-ci-activity-checker" >}}   @@ -128,24 +74,9 @@ The Eco CI Activity checker skips the test run then and thus saves energy and CO Please for Gitlab our main central [Github repository documentation](https://github.com/green-coding-berlin/eco-ci-energy-estimation/blob/main/README.md#gitlab) which talks about the Gitlab functionality. -{{< rawhtml >}} - -
- -
- - Blog arcticle - -
- -
- -
- - Github Repository (for Gitlab documentation) - -
-{{< /rawhtml >}} +{{< button "book" "Blog article" "/blog/eco-ci-gitlab-release/" >}} + +{{< button "code branch" "Github Repository (for Gitlab documentation)" "https://github.com/green-coding-berlin/eco-ci-energy-estimation/blob/main/README.md#gitlab" >}}   @@ -155,8 +86,7 @@ Please for Gitlab our main central [Github repository documentation](https://git A second savings can be had by re-scheuling jobs that are not time-sensitive to times when the electric grid. This is a planned feature that will be devloped soon as a seperate action. -Also hanging and idling VMs are currently tackled in on of our projects to turn them down and stop wasting idle power +Also hanging and idling VMs are currently tackled in on of our projects to turn them down and stop wasting idle power Cloud VMs. Stay tuned on our [blog](/blog) also for updates on this project! - diff --git a/content/projects/energy-id-projects.html b/content/projects/energy-id-projects.html new file mode 100644 index 0000000..912fff0 --- /dev/null +++ b/content/projects/energy-id-projects.html @@ -0,0 +1,445 @@ + + + + + + + + + diff --git a/content/projects/energy-id.de.md b/content/projects/energy-id.de.md new file mode 100644 index 0000000..7e5020a --- /dev/null +++ b/content/projects/energy-id.de.md @@ -0,0 +1,30 @@ +--- +title: "Energy ID" +date: 2023-08-02 10:00:00 +publishDate: 2023-08-02 +draft: false +icon: "portrait" +--- + +Im Energy-ID-Projekt untersuchen wir populäre Open-Source-Software und vergleichen ihren Energie- und CO2-Verbrauch sowie die SCI-Metrik der [Green Software Foundation](https://sci-guide.greensoftware.foundation/) mit Hilfe unseres [Green Metrics Tool](/de/projects/green-metrics-tool/) + +Energy ID erstellt eine *Scorekarte* mit einer: + +- Beschreibung +- Benchmarking- / Nutzungs-Szenario +- Detaillierten Links zur Messung +- Badges + +Die Badges können in Github-Projekten oder ähnlichem verwendet werden und basieren auf unserem [OpenEnergyBadge](/de/projects/open-energy-badge/) Projekt. + +Das Projekt zielt darauf ab, einen schnellen Überblick über den "typischen Anwendungsfall einer Software" zu bekommen und wie viel Energie-/CO2 dabei anfallen würde. + +Wenn Sie ein Projekt weitergehend vergleichen, über einen längeren Zeitraum überwachen oder sogar Optimierungen und Performance-Engineering durchführen möchten, sollten Sie sich die Möglichkeiten des [Green Metrics Tool](/de/projects/green-metrics-tool/) ansehen. + +**Wichtig:** Die hier angegebenen absoluten Zahlen sind nicht als Grundlage für des tatsächlichen CO2 Ausstoß zu sehen. Es handelt sich um den Verbrauch der auf unseren Testmaschinen entstehen und spielgt under gewähltes Szenario wieder. Ein anderer Anwendungsfall könnte ganz andere Werte verursachen. + +Für Messungen auf anderen Rechnern, die Ihrem Setup ähnlicher sind, besuchen Sie unsere [Cluster-Dokumentation](https://docs.green-coding.io/docs/measuring/measurement-cluster/) und führen Sie die Messung auf einem für Ihren Vergleich besser geeigneten Rechner erneut durch. + +# Open Source Projects +--- +{{< include-file "content/projects/energy-id-projects.html" >}} diff --git a/content/projects/energy-id.md b/content/projects/energy-id.md index bdecd28..9eb6fdd 100644 --- a/content/projects/energy-id.md +++ b/content/projects/energy-id.md @@ -20,7 +20,7 @@ The projects aim is to get a quick idea about the "typical use case of a softwar If you want to further compare a project, monitor a project over time or even do optimizations and performance engineering do look into the capabilities of the [Green Metrics Tool](projects/green-metrics-tool/) -**Important:** The absolute numbers shown here are not to be taken as ground truth of the actual carbon cost of the +**Important:** The absolute numbers shown here are not to be taken as ground truth of the actual carbon cost of the software in the wild. These are the cost that happen on our testing machines and also reflect the cost of the scenario chosen by us. A different use-case might incur a vastly different carbon cost. @@ -29,458 +29,4 @@ and re-run the measurement on a machine more apt for your comparison. # Open Source Projects --- - - - - -{{< rawhtml>}} - - - - - - - - -{{}} - - -{{< rawhtml >}} - -{{}} \ No newline at end of file +{{< include-file "content/projects/energy-id-projects.html" >}} diff --git a/content/projects/energy-timeline.md b/content/projects/energy-timeline.md new file mode 100644 index 0000000..7ac4d22 --- /dev/null +++ b/content/projects/energy-timeline.md @@ -0,0 +1,65 @@ +--- +title: "Energy Timeline" +date: 2023-08-02 10:00:00 +publishDate: 2023-08-02 +draft: true +icon: "history" +--- + +Have you ever wondered if your software was not way quicker and responsive a year ago? + +And if you think about that, wouldn't it be interesting to see if the energy consumption has changed alongside with it? +Because you might be using the same features, but now they have become more costly ... + +This effect is called software bloat + +{{< rawhtml >}} +XGBoost performance +
+{{}} + + +Since in the cloud it is often not possible to measure energy directly we have created a Machine Learning estimation model +based on the data from [SPECPower](https://www.spec.org/power_ssj2008/) + +The setup of the model is based on a [research paper](https://interactdc.com/static/images/documents/Elsevier_Journal.pdf) from [Interact DC](https://interactdc.com/) and the University of East London. + +Our model allows for inline measuring in Watts as well as energy budgeting in Joules with many optional input +params to make the model more accurate. + +In the chart on the right you can see the performance for an out-of-sample prediciton. Please find more details +for in-sample predictions, exploratory data analysis and application documentation on Github. + +The model is open-source [AGPLv3 Licensed](https://github.com/green-coding-berlin/green-metrics-tool/blob/main/LICENSE) + +--- + +{{< rawhtml >}} + +
+ +
+ + Github incl. documentation + +
+{{< /rawhtml >}} + +``` +$ ./static-binary | python3 xgb.py --make intel -- cpu-freq 2600 --ram 7 --cpu-threads 24 +191.939294374113 +169.99632303510703 +191.939294374113 +191.939294374113 +191.939294374113 +191.939294374113 +194.37740205685841 +191.939294374113 +169.99632303510703 +191.939294374113 +.... +``` + +--- + + diff --git a/content/projects/green-metrics-tool.de.md b/content/projects/green-metrics-tool.de.md new file mode 100644 index 0000000..05b6ba6 --- /dev/null +++ b/content/projects/green-metrics-tool.de.md @@ -0,0 +1,45 @@ +--- +title: "Green Metrics Tool" +date: 2023-08-03 19:00:00 +publishDate: 2023-01-16 +draft: false +projectimg: "/img/projects/gmt-rapl-graph.webp" +icon: "code" +--- + +Das Green Metrics Tool ist ein freies Open-Source (FOSS) Tool, das wir entwickelt haben um EntwicklerInnen am besten dabei zu unterstützen, den Energie- / CO2-Verbrauch von Software-Architekturen zu messen. + +Unser Tool ist quelloffen [AGPLv3 lizenziert](https://github.com/green-coding-berlin/green-metrics-tool/blob/main/LICENSE) + +Sie können es entweder lokal installieren oder unser Demo-Cluster verwenden, wo wir auch die Möglichkeit bieten, Software zur Messung einzureichen [Link](https://metrics.green-coding.io/request.html). + +Bitte lesen Sie alle Details auf Github und in der Dokumentation. + +{{< button "book" "Dokumentation" "https://docs.green-coding.io" >}} + +{{< button "code branch" "Github" "https://github.com/green-coding-berlin/green-metrics-tool" >}} + +{{< button "columns" "Demo Dashboard" "https://metrics.green-coding.io" >}} + +--- + +## Screenshots der Einzelmessungen + +{{< rawhtml >}} +GMT Screenshot +
+GMT screenshot +
+GMT screenshot +
+GMT screenshot +
+{{< /rawhtml >}} + +## Screenshots der Vergleichsansicht +{{< rawhtml >}} +GMT screenshot +
+GMT screenshot +
+{{< /rawhtml >}} diff --git a/content/projects/green-metrics-tool.md b/content/projects/green-metrics-tool.md index ee9e383..6b88747 100644 --- a/content/projects/green-metrics-tool.md +++ b/content/projects/green-metrics-tool.md @@ -7,7 +7,7 @@ projectimg: "/img/projects/gmt-rapl-graph.webp" icon: "code" --- -The Green Metrics Tool is a free open-source (FOSS) tool we designed as a concept on how we believe it can support +The Green Metrics Tool is a free open-source (FOSS) tool we designed as a concept on how we believe it can support developers best to measure the energy / CO2 consumption of software architectures. Our tool is open-source [AGPLv3 Licensed](https://github.com/green-coding-berlin/green-metrics-tool/blob/main/LICENSE) @@ -16,38 +16,18 @@ You can either install it locally or use our demo dashboard, where we also offer Please see all the details on Github and in the documentation. -{{< rawhtml >}} - -
- -
- - Documentation - -
- -
- -
- - Github - -
- -
- -
- - Demo Dashboard - -
-{{< /rawhtml >}} +{{< button "book" "Documentation" "https://docs.green-coding.io" >}} + +{{< button "code branch" "Github" "https://github.com/green-coding-berlin/green-metrics-tool" >}} + +{{< button "columns" "Demo Dashboard" "https://metrics.green-coding.io" >}} + --- ## Screenshots of the Single Run View -{{< rawhtml >}} +{{< rawhtml >}} GMT Screenshot
GMT screenshot @@ -59,7 +39,7 @@ Please see all the details on Github and in the documentation. {{< /rawhtml >}} ## Screenhots of the comparison view -{{< rawhtml >}} +{{< rawhtml >}} GMT screenshot
GMT screenshot diff --git a/content/projects/open-energy-badge.de.md b/content/projects/open-energy-badge.de.md new file mode 100644 index 0000000..c8466a2 --- /dev/null +++ b/content/projects/open-energy-badge.de.md @@ -0,0 +1,28 @@ +--- +title: "OpenEnergyBadge" +date: 2023-01-14 19:00:00 +publishDate: 2023-01-14 +draft: false +projectimg: "/img/projects/open-energy-badge.webp" +icon: "battery three quarters" + +--- + +{{< rawhtml >}} +GMT Screenshot +
+{{}} + +Das **Open Energy Badge** ist ein Projekt, bei dem wir ein neues Badge für Github Repositories vorschlagen, das über die Kosten von: + +- die Durchführung der Tests im Projekt +- Wie viel eine bestimmte Aktivität kostet (z.B. die Kosten für einen API-Aufruf) +- Wie viel die Erstellung der statischen Website kostet +- usw. + +Sie werden automatisch von unserem [Green Metrics Tool](/de/projects/green-metrics-tool) erstellt und können frei verwendet werden. + +Die erstellten Energiemetriken gibt es in verschiedenen Metrik-Typen, je nachdem, ob Sie sie lokal auf Ihrem Entwicklungssystem getestet haben +oder auf unserer [gehosteten Demo-Instanz] (https://metrics.green-coding.io) mit der Option, Software zur Messung einzureichen. + +Sehen Sie den Badge in Aktion, zum Beispiel, in unserem [Dokumentation Repository](https://github.com/green-coding-berlin/documentation) an, wo Sie direkt sehen können, wie hoch die Erstellungskosten der statischen Seiten waren. diff --git a/content/projects/open-energy-badge.md b/content/projects/open-energy-badge.md index d64bea4..4f1d216 100644 --- a/content/projects/open-energy-badge.md +++ b/content/projects/open-energy-badge.md @@ -15,6 +15,7 @@ icon: "battery three quarters" The **Open Energy Badge** is a project where we propose a new badge for Github Repositories that informs about the cost of: + - Running the tests in the project - How much a certain activity costs (ex. the cost of an API call) - How much building the static website costs @@ -23,7 +24,7 @@ of: It is created automatically by our [Green Metrics Tool](/projects/green-metrics-tool) and can freely be used. The energy metrics created come in different metric types, depending on if you tested it locally on your dev system -or on our [hosted demo instance](https://metrics.green-coding.io) with option to submit software for measuring. +or on our [hosted demo instance](https://metrics.green-coding.io) with option to submit software for measuring. See the badge in action for instance on our [documentation repository](https://github.com/green-coding-berlin/documentation) where you can directly see how much the building costs of the static pages where. \ No newline at end of file diff --git a/content/projects/power-hog.de.md b/content/projects/power-hog.de.md new file mode 100644 index 0000000..4d34f4f --- /dev/null +++ b/content/projects/power-hog.de.md @@ -0,0 +1,52 @@ +--- +title: "Power Hog" +date: 2023-9-20 19:00:00 +publishDate: 2023-01-16 +draft: false +icon: "piggy bank" +--- + +Der Power Hog bietet eine Reihe von Tools, mit denen Sie den Energieverbrauch Ihres Computers überwachen können. +Derzeit sind die drei Hauptziele: + +1) Der Benutzerin die Möglichkeit geben, zu analysieren, welche Prozesse wie viel Energie verbrauchen und wie sich dies auf das System auswirkt. +2) Zentrale Erfassung der Stromverbrauchsdaten, um Anwendungen/Prozesse zu identifizieren, die optimiert werden könnten, um auf breiterer Ebene Energie zu sparen. +3) Der Nutzerin die Möglichkeit geben, die Daten an eine CO2 Datenbank zu übermitteln, damit der Energieverbrauch einem Projekt zugeordnet werden kann. + +Derzeit unterstützen wir nur MacOSX durch das powermetrics [[1]](https://www.green-coding.io/blog/power-measurement-on-macos/) Tool! +Für die Unterstützung von Linux oder Windows ist noch mehr Arbeit nötig. + +Der Hog besteht aus zwei Hauptteilen. + +Das Hintergrundskript, das alle Daten sammelt und sie an einen Server sendet. Dies wird `power_logger` genannt. Mehr +Informationen können hier gefunden werden: + +[https://github.com/green-coding-berlin/hog#power-logger](https://github.com/green-coding-berlin/hog#power-logger) + +Die App, die Ihnen erste Einblicke in die gesammelten Daten und weitere Informationen über den Hog auf Ihrem System gibt. Einzelheiten +finden Sie hier: + +[https://github.com/green-coding-berlin/hog#the-desktop-app](https://github.com/green-coding-berlin/hog#the-desktop-app) + +Detaillierte Analysen können im Dashboard des Green Metrics Tool durchgeführt werden. Sie können entweder unseren Server oder Ihren eigenen Host verwenden. + +Eine vollständige Readme und Dokumentation finden Sie im GitHub Repo. + +{{< button "arrow alternate circle down" "Download" "https://github.com/green-coding-berlin/hog/releases" >}} + +{{< button "book" "Dokumentation" "https://github.com/green-coding-berlin/hog/blob/main/README.md#the-power-hog" >}} + +{{< button "code branch" "Github" "https://github.com/green-coding-berlin/hog/" >}} + +--- + +## Screenshots + +{{< rawhtml >}} +HOG Screenshot +
+HOG Screenshot +
+HOG Screenshot +
+{{< /rawhtml >}} diff --git a/content/services.de.md b/content/services.de.md new file mode 100644 index 0000000..d59bd33 --- /dev/null +++ b/content/services.de.md @@ -0,0 +1,147 @@ +--- +title: "Warum für uns entscheiden?" +draft: false +description: "Sehen Sie sich unsere Dienstleistungen an, mit denen Sie Ihren Software-Stack umweltfreundlicher gestalten können" +date: 2023-09-14 08:00:00 +author: "Arne Tarara" +authorlink: "https://de.linkedin.com/in/arne-tarara" + +--- + +Möchten Sie die Software und IT nachhaltig optimiren in Bezug auf Energie und CO2-Emissionen? Ganz gleich, ob Sie diese Transformation gerade erst beginnen oder ob Sie die Emissionen weiter optimieren und reduzieren möchten, wir können Ihnen helfen! + +Unser Team besteht aus erfahrenen Software-Performance- und Pipeline-Ingenieuren, die sich inzwischen auf nachhaltige Software-Entwicklung spezialisiert haben. + +Wir sind [regelmäßige Sprecher auf Konferenzen und Branchensymposien](/de/#nav-meetups), entwickeln +[Projekte](/de/#projects) sowie [Open-Source-Tools zur Messung von CO2 in Containern](/de/projects/green-metrics-tool) und aktive Befürworter und Berater für Energie- und CO2 transparenz [State of the art carbon pipeline tooling](/de/projects/eco-ci) +durch Zertifizierungen wie den [Blauen Engel für Software](https://www.blauer-engel.de/de/produktwelt/ressourcen-und-energieeffiziente-softwareprodukte) oder unser Projekt [Energy-ID](/de/projects/energy-id). + +Einige unserer jüngsten anerkannten Arbeiten in der Forschung umfassen +[maßgeschneiderte Open-Source-Maschinenmodelle zur Energieabschätzung in Cloud-Umgebungen](/de/projects/cloud-energy), veröffentlichte Positionspapiere +Papiere und akademische Papiere für Software-Lebenszyklus-Bewertung und Software-Energiemessung, Arbeit an +[Software-Lifecycle-Assessment](https://publication2023.bits-und-baeume.org/#book/38) mit Entwickler-Tools und zitierte +[Fallstudien für Energieeinsparungen und -optimierungen](/case-studies). + + +{{< greenblock >}} +Dienstleistungen +{{}} + +{{< rawhtml >}} +
+
+
+
Forschung & Entwicklung
+ +

Weitere Beispiele anzeigen ...

+
+
+
Workshops und Schulungen
+ +

Weitere Beispiele anzeigen ...

+
+
+
Messung & Optimierung
+ +

Weitere Beispiele anzeigen ...

+
+
+
Beratung
+ +

Weitere Beispiele anzeigen ...

+
+
+
Brauchen Sie noch etwas?
+ Lassen Sie uns reden! +
+
+
+ +{{}} + +{{< whiteblock >}} +{{}} + + +{{< include-partial "references" >}} + + +{{< greenblock >}} +Interessiert? +{{}} + +{{< rawhtml >}} + +
+
+

Kontaktieren Sie uns

+

Green Coding Solutions GmbH
+ Jablonskistr. 24
+ 10405 Berlin (Germany)

+

(+49) 160 947 930 24

+

info@green-coding.io

+
+ +
+
+
+
+
+
+
+
+ + + + +
+
+ + +
+
+ +
+ +
+
+
+
+
+{{}} + + diff --git a/content/services.md b/content/services.md index 2984f23..e9b8a6f 100644 --- a/content/services.md +++ b/content/services.md @@ -10,18 +10,18 @@ authorlink: "https://de.linkedin.com/in/arne-tarara" Are you keen on enhancing the transparency of your software in terms of energy and CO2 emissions? Whether you're starting this journey or looking to further optimize and reduce emissions, we can assist! -Our team consists of senior software performance and pipeline engineers that have since specialized in sustainable +Our team consists of senior software performance and pipeline engineers that have since specialized in sustainable software engineering. -We are [regular speaker at conferences and industry symposiums](/#nav-meetups), developing -[open source industry advancing container native carbon measurement tools](/projects/green-metrics-tool) as well as -[state of the art carbon pipeline tooling](/projects/eco-ci), active advocates and consultants for energy and carbon transparency +We are [regular speaker at conferences and industry symposiums](/#nav-meetups), developing +[open source industry advancing container native carbon measurement tools](/projects/green-metrics-tool) as well as +[state of the art carbon pipeline tooling](/projects/eco-ci), active advocates and consultants for energy and carbon transparency through certifications like the [Blauer Engel für Software](https://www.blauer-engel.de/de/produktwelt/ressourcen-und-energieeffiziente-softwareprodukte) or our [Energy-ID](/projects/energy-id) project. -Some of our recent recognized works in research include -[tailored open source machine models for energy estimation in cloud environments](/projects/cloud-energy), published positional -papers and academical papers for software lifecycle assessment and software energy measurement, work on -[Software-Lifecycle-Assessment](https://publication2023.bits-und-baeume.org/#book/38) with developer tools to be released later this year and cited +Some of our recent recognized works in research include +[tailored open source machine models for energy estimation in cloud environments](/projects/cloud-energy), published positional +papers and academical papers for software lifecycle assessment and software energy measurement, work on +[Software-Lifecycle-Assessment](https://publication2023.bits-und-baeume.org/#book/38) with developer tools to be released later this year and cited [case studies for energy savings and optimizations](/case-studies). @@ -34,7 +34,7 @@ Services
Research & Development
-
Workshops & Training
@@ -72,7 +72,7 @@ Services
  • Website carbon certifications
  • -

    Show more examples ...

    +

    Show more examples ...

    Consulting
    @@ -84,7 +84,7 @@ Services
  • Green software development guidelines
  • -

    Show more examples ...

    +

    Show more examples ...

    Need something else?
    @@ -122,7 +122,7 @@ Interested?

    Contact us

    -

    Green Coding Berlin GmbH
    +

    Green Coding Solutions GmbH
    Jablonskistr. 24
    10405 Berlin (Germany)

    (+49) 160 947 930 24

    @@ -134,14 +134,14 @@ Interested?
    -
    +
    -
    - +
    + - + -
    +
    @@ -150,7 +150,7 @@ Interested?
    -
    +
    diff --git a/content/talks-and-events/adesso-2023-12.de.md b/content/talks-and-events/adesso-2023-12.de.md new file mode 100644 index 0000000..1548b59 --- /dev/null +++ b/content/talks-and-events/adesso-2023-12.de.md @@ -0,0 +1,45 @@ +--- +title: "ECO:DIGIT & Green Software Foundation SCI - adesso [x] Green Coding Solutions" +date: 2023-12-06 18:30:00 +publishDate: 2023-11-28 +draft: false +eventtype: "Meetup" +eventlink: "https://www.meetup.com/green-coding/events/297632091" +eventname: "ECO:DIGIT & Green Software Foundation SCI" +eventimg: "/img/talks-and-events/adesso_2023_12.webp" +eventimg600w: "/img/talks-and-events/adesso_2023_12_1200.webp" +--- + +Diesmal haben wir [Yelle Lieder](https://www.linkedin.com/in/lieder/) von [adesso](https://www.adesso.de/de/) eingeladen, um über ihr Projekt [ECO:DIGIT](https://gi.de/aktuelles/projekte/eco-digit) zu sprechen. + +In seinem Vortrag wird Yelle Lieder (adesso) das Forschungsprojekt ECO:DIGIT vorstellen. ECO:DIGIT startete im Sommer 23 und hat das Ziel, im Laufe von drei Jahren einer ganzheitlichen und wissenschaftlich fundierten Ökobilanzierung digitaler Systeme näher zu kommen. +In einem Kurzvortrag erfahren Sie mehr über die Vision des vom Bundesministerium für Wirtschaft und Klimaschutz geförderten Projekts. + + +Natürlich erfahren Sie auch, welche Vorteile das Projekt gegenüber bestehenden Lösungen verspricht und welche Möglichkeiten vor allem die Open-Source-Community hat, sich zu beteiligen. + + +Anschließend stellen wir Ihnen einige Open-Source-Werkzeuge vor, mit denen Sie einen automatisierten SCI-Score für Ihre Software erstellen können. +Der [SCI](https://sci-guide.greensoftware.foundation/) ist eine Metrik der Green Software Foundation, die die Kohlenstoff-Effizienz eines Softwareprodukts anzeigt. + + +Das [Green Metrics Tool](https://github.com/green-coding-berlin/green-metrics-tool/), ein automatisiertes und von uns, Green Coding Solutions, entwickeltes Open-Source-Tool, ermöglicht eine solche automatisierte Messung. +Wir werden kurz über die Funktionalitäten des Tools sprechen und dann Perspektiven aufzeigen, wie Software anhand ihrer Kohlenstoff-Effizienz vergleichbar gemacht werden kann und wie Unternehmen und Entwickler dies nutzen können, um nachhaltige Software zu schreiben. + + +## Tagesordnung + + +- 18:30 - Öffnung der Türen und Begrüßung +- 19:00 - Vortrag von Yelle Lieder - adesso inkl. FRAGEN UND ANTWORTEN +- 19:30 - Vortrag von Arne Tarara - Green Coding Solutions inkl. FRAGEN UND ANTWORTEN +- 20:00 - Offene Fragerunde und Networking ... offenes Ende + + +Freigetränke und Shoutout an adesso + + +Vielen Dank an adesso für das Sponsoring des Raumes und der Getränke für diesen Abend! ❤️ + +*Englischsprachige Veranstaltung* + diff --git a/content/talks-and-events/adesso-2023-12.md b/content/talks-and-events/adesso-2023-12.md index e853feb..45cc9dc 100644 --- a/content/talks-and-events/adesso-2023-12.md +++ b/content/talks-and-events/adesso-2023-12.md @@ -1,5 +1,5 @@ --- -title: "ECO:DIGIT & Green Software Foundation SCI - adesso [x] Green Coding Berlin" +title: "ECO:DIGIT & Green Software Foundation SCI - adesso [x] Green Coding Solutions" date: 2023-12-06 18:30:00 publishDate: 2023-11-28 draft: false @@ -20,14 +20,14 @@ You will of course also learn what advantages the project promises compared to e Afterwards we will be speaking on some open source tooling to create an automated SCI score for your software. The [SCI](https://sci-guide.greensoftware.foundation/) is a metric by the Green Software Foundation that shows the carbon efficiency of a software product. -The [Green Metrics Tool](https://github.com/green-coding-berlin/green-metrics-tool/), an automated and open source tool developend by us, Green Coding Berlin, allows for such an automated measurement. +The [Green Metrics Tool](https://github.com/green-coding-berlin/green-metrics-tool/), an automated and open source tool developend by us, Green Coding Solutions, allows for such an automated measurement. We will talk briefly about the tool functionalities and then perspectives how make software comparable according to their carbon efficiency and how companies and developers can leverage this to write sustainable software. ## Agenda - 18:30 - Doors Open & Greetings - 19:00 - Talk by Yelle Lieder - adesso incl. Q&A -- 19:30 - Talk by Arne Tarara - Green Coding Berlin incl. Q&A +- 19:30 - Talk by Arne Tarara - Green Coding Solutions incl. Q&A - 20:00 - Open Q&A and Networking ... open end Free Drinks and shout out to adesso diff --git a/content/talks-and-events/clean-it-potsdam-04-2023.md b/content/talks-and-events/clean-it-potsdam-04-2023.md index 7ceaeea..28a4924 100644 --- a/content/talks-and-events/clean-it-potsdam-04-2023.md +++ b/content/talks-and-events/clean-it-potsdam-04-2023.md @@ -17,7 +17,7 @@ We are invited to speak at the [openXchange events](https://open.hpi.de/courses/ We present our open-source projects Eco CI to make energy consumption in CI/CD pipelines visible and the Green Metrics Tool which helps developers compare arbitrary software regarding its energy cost. -Arne Tarara works for Green Coding Berlin, which is a Berlin-based software company focused on creating open-source and measurements in the domain of software energy consumption. He has been working as a software developer for the last 16 years mainly in the web domain with a strong background in analytics and linear modeling. +Arne Tarara works for Green Coding Solutions, which is a Berlin-based software company focused on creating open-source and measurements in the domain of software energy consumption. He has been working as a software developer for the last 16 years mainly in the web domain with a strong background in analytics and linear modeling. ## Slide-Deck PDF diff --git a/content/talks-and-events/green-coding-infrastructure-gfk-2023-05.md b/content/talks-and-events/green-coding-infrastructure-gfk-2023-05.md index e40c976..9edf503 100644 --- a/content/talks-and-events/green-coding-infrastructure-gfk-2023-05.md +++ b/content/talks-and-events/green-coding-infrastructure-gfk-2023-05.md @@ -1,11 +1,11 @@ --- -title: "Green Infrastructure Meetup GfK [x] Green Coding Berlin" +title: "Green Infrastructure Meetup GfK [x] Green Coding Solutions" date: 2023-05-31 18:30:00 publishDate: 2023-05-12 draft: false eventtype: "Meetup" eventlink: "https://www.meetup.com/green-coding/events/293492344/" -eventname: "Green Infrastructure Meetup GfK [x] Green Coding Berlin" +eventname: "Green Infrastructure Meetup GfK [x] Green Coding Solutions" eventimg: "/img/talks-and-events/green-coding-infrastructure-gfk-2023-05.webp" eventimg600w: '/img/talks-and-events/green-coding-infrastructure-gfk-2023-05-600w.webp' --- @@ -27,7 +27,7 @@ The event is supposed to bring together like-minded professionals, developers, a Bernhard Günther (Principal Site Reliability Engineer, GfK) will give a short presentation about **Green IT and sustainability in the Cloud** to feed an emerging discussion. As the cloud gives you a lot of freedom it also forces you to take the responsibility to plan your cloud infrastructure sustainable yourself. -Arne Tarara will share insights on **Understanding Software Carbon emissions**. Software uses no physical resources directly, but the underlying infrastructure. In order to help developers contribute to the worlds carbon neutral target it is key to understand where carbon emissions from software come from and how they can be identified. Arne, a software engineer from Open-Source company Green Coding Berlin, will give an overview over current models and frameworks for classifying and attributing software carbon share and current limitations with these approaches. He will present some Open-Source tools that make energy cost and carbon emissions visible for a developer to drive energy consumption and carbon emissions down. +Arne Tarara will share insights on **Understanding Software Carbon emissions**. Software uses no physical resources directly, but the underlying infrastructure. In order to help developers contribute to the worlds carbon neutral target it is key to understand where carbon emissions from software come from and how they can be identified. Arne, a software engineer from Open-Source company Green Coding Solutions, will give an overview over current models and frameworks for classifying and attributing software carbon share and current limitations with these approaches. He will present some Open-Source tools that make energy cost and carbon emissions visible for a developer to drive energy consumption and carbon emissions down. In addition to the speakers, there will be enough time for networking and discussion among attendees. We encourage everyone to bring their own ideas and questions to share with the community. diff --git a/content/talks-and-events/green-software-development-04-2023.md b/content/talks-and-events/green-software-development-04-2023.md index 77c19ad..7556eef 100644 --- a/content/talks-and-events/green-software-development-04-2023.md +++ b/content/talks-and-events/green-software-development-04-2023.md @@ -32,7 +32,7 @@ We're looking forward to see you there! - 6:30 PM | For in-person: Doors open. Grab some snacks & drinks - 7:00 PM | Start of Hybrid Meetup - 7:00 PM | Welcome & Intro -- 7:15 PM | "Tools and Best-Practices for Sustainable Software - An overview", Arne Tarara, Lead Engineer at Green Coding Berlin +- 7:15 PM | "Tools and Best-Practices for Sustainable Software - An overview", Arne Tarara, Lead Engineer at Green Coding Solutions - Afterwards: More snacks and drinks as well as plenty of time for networking with all attendees #### Talk: Tools and Best-Practices for Sustainable Software - An overview diff --git a/content/talks-and-events/greening-digital-with-tgwf.md b/content/talks-and-events/greening-digital-with-tgwf.md index a8d0c55..edbc30b 100644 --- a/content/talks-and-events/greening-digital-with-tgwf.md +++ b/content/talks-and-events/greening-digital-with-tgwf.md @@ -1,11 +1,11 @@ --- -title: "Greening Digital - The Green Web Foundation [x] Green Coding Berlin" +title: "Greening Digital - The Green Web Foundation [x] Green Coding Solutions" date: 2022-08-03 18:30:00 publishDate: 2022-07-22 draft: false eventtype: "Meetup" eventlink: "https://www.meetup.com/green-coding/events/287352713/" -eventname: "Greening Digital - The Green Web Foundation [x] Green Coding Berlin" +eventname: "Greening Digital - The Green Web Foundation [x] Green Coding Solutions" eventimg: "/img/talks-and-events/greening-digital-with-tgwf.webp" eventimg600w: "/img/talks-and-events/greening-digital-with-tgwf-600w.webp" @@ -17,8 +17,8 @@ We have teamed with **Chris Adams** from **The Green Web Foundation** to co-host Chris will give a share what you need to know about how digital infrastructure is powered, and how to make the greenest choices for powering your services. He will also talk about the lastest works of The Green Web Foundation and their mission to create a fossil free internet. -## Part#2 Green Coding Berlin -In the second part of the Meetup we as Green Coding Berlin will be presenting our recent works on our Open Source Toolchain (AGPLv3) to measure the carbon impact of your Software: [The Green Metrics Tool](https://github.com/green-coding-berlin/green-metrics-tool) +## Part#2 Green Coding Solutions +In the second part of the Meetup we as Green Coding Solutions will be presenting our recent works on our Open Source Toolchain (AGPLv3) to measure the carbon impact of your Software: [The Green Metrics Tool](https://github.com/green-coding-berlin/green-metrics-tool) We will also walk you through the approach to measure your infrastructure and software with our tool and how you can use it in your own project. ## Casual Pizzas & Community diff --git a/content/talks-and-events/gsd-meetup-karlsruhe-2023-01.md b/content/talks-and-events/gsd-meetup-karlsruhe-2023-01.md index 9de8933..d99cb0f 100644 --- a/content/talks-and-events/gsd-meetup-karlsruhe-2023-01.md +++ b/content/talks-and-events/gsd-meetup-karlsruhe-2023-01.md @@ -27,7 +27,7 @@ In dem Vortrag stellt Arne einige Tools vor, welche es ermöglichen die Energie Neben dem Betrieb der Software verbraucht die Entwicklung und insbesondere die CI/CD Pipeline Energie. Wir sehen, wie wir diese Emissionen aus der Pipeline messen und anzeigen. -Arne Tarara ist passionierter Softwareentwickler und seit 16 Jahren in der Branche. Aktuell verfolgt er mit seinem Unternehmen, der Green Coding Berlin GmbH das Ziel, den Energieverbrauch von Software für Entwickler und Anwender einfach sichtbar zu machen. +Arne Tarara ist passionierter Softwareentwickler und seit 16 Jahren in der Branche. Aktuell verfolgt er mit seinem Unternehmen, der Green Coding Solutions GmbH das Ziel, den Energieverbrauch von Software für Entwickler und Anwender einfach sichtbar zu machen. ------------------------------------------- diff --git a/content/talks-and-events/kickoff-green-software-development-stuttgart-2023-10.md b/content/talks-and-events/kickoff-green-software-development-stuttgart-2023-10.md index a9b7995..fdc46ed 100644 --- a/content/talks-and-events/kickoff-green-software-development-stuttgart-2023-10.md +++ b/content/talks-and-events/kickoff-green-software-development-stuttgart-2023-10.md @@ -1,5 +1,5 @@ --- -title: "Kickoff Green Software Development Stuttgart [x] Green Coding Berlin" +title: "Kickoff Green Software Development Stuttgart [x] Green Coding Solutions" date: 2023-10-05 18:00:00 publishDate: 2023-10-02 draft: false @@ -22,7 +22,7 @@ Agenda: - Begrüßung - Kurzvorstellung: Wer sind die Initiatoren? Was ist die Idee hinter der Meetup-Reihe? -Präsentation Arne Tarara (Green Coding Berlin): Sustainable Software: How can we quantify and measure "green-ness" of code? +Präsentation Arne Tarara (Green Coding Solutions): Sustainable Software: How can we quantify and measure "green-ness" of code? - Diskussion: Vorstellungen, Erwartungen und mögliche Beiträge der Teilnehmer. Welche Themen fändet ihr spannend? (Das kann auch als "World Cafe" stattfinden. Wir entscheiden das spontan.) diff --git a/content/talks-and-events/pydata-meetup-nov-2022.md b/content/talks-and-events/pydata-meetup-nov-2022.md index 1c642ff..698b079 100644 --- a/content/talks-and-events/pydata-meetup-nov-2022.md +++ b/content/talks-and-events/pydata-meetup-nov-2022.md @@ -1,11 +1,11 @@ --- -title: "Cloud energy consumption - PyData Berlin [x] Green Coding Berlin" +title: "Cloud energy consumption - PyData Berlin [x] Green Coding Solutions" date: 2022-11-16 19:00:00 publishDate: 2022-11-09 draft: false eventtype: "Meetup" eventlink: "https://www.meetup.com/green-coding/events/289642025/" -eventname: "Cloud energy consumption - PyData Berlin [x] Green Coding Berlin" +eventname: "Cloud energy consumption - PyData Berlin [x] Green Coding Solutions" eventimg: "/img/talks-and-events/pydata-meetup-nov-2022.webp" eventimg600w: "/img/talks-and-events/pydata-meetup-nov-2022-600w.webp" @@ -30,7 +30,7 @@ Energy cost estimation for cloud workloads is an emerging topic that surfaces es ## Slide-Deck PDF -Here are the slides to our talk, where we present the works on and performance of the model +Here are the slides to our talk, where we present the works on and performance of the model at the PyData Nov'22 Meetup in Berlin. [Download Slides](/slides/PyData-Talk.pdf) \ No newline at end of file diff --git a/content/talks-and-events/sdia-event-0922.md b/content/talks-and-events/sdia-event-0922.md index 928aae9..41e2fbb 100644 --- a/content/talks-and-events/sdia-event-0922.md +++ b/content/talks-and-events/sdia-event-0922.md @@ -13,11 +13,11 @@ We have been invited to speak and give a workshop on the topic of measuring and Please have a look at the official event page in the link at the bottom and RSVP. -This is the appetizer text: +This is the appetizer text: > Arne will be presenting tooling for developers to measure energy-use & emissions. > At our community event in Berlin, Arne will be presenting the open-source tooling that his team is building for developers to understand the energy-use of their applications. It is aimed at measuring the energy use during the execution of standard-usage scenarios (as required by the German Blue Angel certification), but can be used to get a good understanding of any running application. -> It’s based on the RAPL interface and is under heavy development. Green Coding Berlin is also actively contributing to the Life Cycle Assessment methodology that is under development at the SDIA for digital resources (https://sdia.io/def-summary) +> It’s based on the RAPL interface and is under heavy development. Green Coding Solutions is also actively contributing to the Life Cycle Assessment methodology that is under development at the SDIA for digital resources (https://sdia.io/def-summary) #### Update: Recording diff --git a/content/talks-and-events/sustainable-software-kde.md b/content/talks-and-events/sustainable-software-kde.md index 3bfaeeb..4066574 100644 --- a/content/talks-and-events/sustainable-software-kde.md +++ b/content/talks-and-events/sustainable-software-kde.md @@ -1,11 +1,11 @@ --- -title: "Sustainable Software - KDE Eco [x] Green Coding Berlin" +title: "Sustainable Software - KDE Eco [x] Green Coding Solutions" date: 2022-09-14 18:30:00 publishDate: 2022-08-17 draft: false eventtype: "Meetup" eventlink: "https://www.meetup.com/green-coding/events/287887811/" -eventname: "Sustainable Software - KDE Eco [x] Green Coding Berlin" +eventname: "Sustainable Software - KDE Eco [x] Green Coding Solutions" eventimg: "/img/talks-and-events/meetup-kde.webp" eventimg600w: "/img/talks-and-events/meetup-kde-600w.webp" @@ -20,7 +20,7 @@ He will provide an overview of the Free and Open Source (FOSS) energy measuremen Joseph will additionally discuss how the FOSS values of user autonomy and transparency enable users to directly influence the factors determining software sustainability. ## Part #2 -In the second part of the Meetup we as Green Coding Berlin will be presenting some insights on how to identify energy saving potentials in Software and how either architecture or code level optimizations can help you achieve them. +In the second part of the Meetup we as Green Coding Solutions will be presenting some insights on how to identify energy saving potentials in Software and how either architecture or code level optimizations can help you achieve them. ## Casual Pizzas & Community Afterwards, as usual, we will hang out and chat. There is also time and space to present you Green Software project in a casual round afterwards. diff --git a/i18n/de.yaml b/i18n/de.yaml new file mode 100644 index 0000000..a64cca1 --- /dev/null +++ b/i18n/de.yaml @@ -0,0 +1,42 @@ +- id: home_what_we_do + translation: "Was wir machen" +- id: home_teaser + translation: "Wir unterstützen Unternehmen, Behörden und Open-Source-Projekte dabei, die CO2-Emissionen ihrer digitalen Dienste zu analysieren und zu reduzieren, um umweltfreundlichere digitale Lösungen zu fördern." +- id: home_cta + translation: "👉 Mehr Details" +- id: home_about_details + translation: "Mehr erfahren über die CO2-Emissionen von Software" +- id: home_what_we_do + translation: "Was wir machen" +- id: home_projects + translation: "Projekte" +- id: home_projects_teaser + translation: "Wir entwickeln Werkzeuge, die es Entwicklern und Anwendern ermöglichen Optimierungen zu machenum den Energieverbrauch und CO2-Emissionen zu senken" +- id: home_talks_teaser + translation: "Eine Auswahl unserer letzten Events und Veranstaltungen" +- id: home_talks_full_list + translation: "Alle Events und Veranstaltungen" + + + +- id: about + translation: Über +- id: services + translation: Dienstleistungen +- id: talks + translation: Vorträge & Veranstaltungen +- id: blog + translation: Blog +- id: jobs + translation: Jobs +- id: formulas + translation: CO2-Formeln +- id: studies + translation: Fallstudien +- id: team + translation: Team +- id: list_orgs + translation: "Eine ausgewählte Liste von Organisationen, mit denen wir zusammengearbeitet haben oder bei denen wir Vorträge gehalten haben." + +- id: references + translation: "Referenzen" diff --git a/i18n/en.yaml b/i18n/en.yaml new file mode 100644 index 0000000..dbd53ee --- /dev/null +++ b/i18n/en.yaml @@ -0,0 +1,39 @@ +- id: home_what_we_do + translation: "What we do" +- id: home_teaser + translation: "We help companies, goverment bodies and open source communites to understand and optimize the carbon emissions of their digital services" +- id: home_cta + translation: "👉 See our services" +- id: home_about_details + translation: "Read some details about the carbon emissions of software" +- id: home_projects + translation: "Projects" +- id: home_projects_teaser + translation: "We develop tools to empower developers and users to reduce energy and carbon emissons of software" +- id: home_talks_teaser + translation: "Browse through our latest events and watch recordings (if available :) )" +- id: home_talks_full_list + translation: "For a full list" + + +- id: about + translation: Über +- id: services + translation: Services +- id: talks + translation: Talks & Events +- id: blog + translation: Blog +- id: jobs + translation: Jobs +- id: formulas + translation: CO2-Formulas +- id: studies + translation: Case Studies +- id: team + translation: Team +- id: list_orgs + translation: "A curated list of organizations we have been working with or have been giving talks at." + +- id: references + translation: "References" diff --git a/layouts/404.html b/layouts/404.html index 1b8a97f..7c1a037 100644 --- a/layouts/404.html +++ b/layouts/404.html @@ -1,7 +1,3 @@ -{{ define "site-navigation" }} -{{/* We can override any block in the baseof file be defining it in the template */}} -{{ partial "site-navigation.html" . }} -{{ end }} {{ define "main" }}
    diff --git a/layouts/_default/baseof.html b/layouts/_default/baseof.html index 9c3c826..85bc168 100644 --- a/layouts/_default/baseof.html +++ b/layouts/_default/baseof.html @@ -1,9 +1,5 @@ - - - - - + @@ -47,6 +43,13 @@ {{ end }} + + {{ if .IsTranslated }} + {{ range .Translations }} + + {{ end }} + {{ end }} + {{ partial "site-style.html" . }} {{ block "favicon" . }} @@ -60,7 +63,7 @@