Allora...
Ho installato codeblocks 20.03-MinGW (GCC 8.1.0).
Ho modificato il codice al fine di rendere le versioni string e vector praticamente equivalenti dal punto di vista algoritmico:
#include <iostream>
#include <cstdint>
#include <chrono>
#include <string>
#include <vector>
using namespace std;
using namespace std::chrono;
string addition1(const string &s1, const string &s2)
{
string s3;
unsigned int n = 0;
for(unsigned int i = 0; i < s1.size() || i < s2.size(); ++i)
{
n /= 10;
if(i < s1.size())
{
n += s1[i] - '0';
}
if(i < s2.size())
{
n += s2[i] - '0';
}
s3.push_back(n % 10 + '0');
}
if(n >= 10)
{
s3.push_back('1');
}
return s3;
}
string multiplication1(const string &s1, const string &s2)
{
string s3;
string temp;
for(unsigned int i = 0; i < s1.size(); ++i)
{
temp.resize(0);
unsigned int n = 0;
for(unsigned int j = 0; j < i; ++j)
{
temp.push_back('0');
}
for(unsigned int j = 0; j < s2.size(); ++j)
{
n = (s1[i] - '0') * (s2[j] - '0') + n / 10;
temp.push_back(n % 10 + '0');
}
if(n >= 10)
{
temp.push_back(n / 10 + '0');
}
s3 = addition1(s3, temp);
}
return s3;
}
vector<uint8_t> addition2(const vector<uint8_t> &v1, const vector<uint8_t> &v2)
{
vector<uint8_t> v3;
uint8_t n = 0;
for(unsigned int i = 0; i < v1.size() || i < v2.size(); ++i)
{
n /= 10;
if(i < v1.size())
{
n += v1[i];
}
if(i < v2.size())
{
n += v2[i];
}
v3.push_back(n % 10);
}
if(n >= 10)
{
v3.push_back(1);
}
return v3;
}
vector<uint8_t> multiplication2(const vector<uint8_t> &v1, const vector<uint8_t> &v2)
{
vector<uint8_t> v3;
vector<uint8_t> temp;
for(unsigned int i = 0; i < v1.size(); ++i)
{
temp.resize(0);
uint8_t n = 0;
for(unsigned int j = 0; j < i; ++j)
{
temp.push_back(0);
}
for(unsigned int j = 0; j < v2.size(); ++j)
{
n = v1[i] * v2[j] + n / 10;
temp.push_back(n % 10);
}
if(n >= 10)
{
temp.push_back(n / 10);
}
v3 = addition2(v3, temp);
}
return v3;
}
int main()
{
cout << "calcolo di 12345^1000 (versione string): ";
string a = "54321";
string b = "1";
auto start1 = high_resolution_clock::now();
for(unsigned int i = 0; i < 1000; ++i)
{
b = multiplication1(a, b);
}
auto stop1 = high_resolution_clock::now();
auto duration1 = duration_cast<milliseconds>(stop1 - start1);
cout << duration1.count() << " ms" << endl;
//-----------------------------------------------------------
cout << "calcolo di 12345^1000 (versione vector): ";
vector<uint8_t> c = {5, 4, 3, 2, 1};
vector<uint8_t> d = {1};
auto start2 = high_resolution_clock::now();
for(unsigned int i = 0; i < 1000; ++i)
{
d = multiplication2(c, d);
}
auto stop2 = high_resolution_clock::now();
auto duration2 = duration_cast<milliseconds>(stop2 - start2);
cout << duration2.count() << " ms" << endl;
}
Detto ciò, la faccenda diventa ancora più "strana":
- lanciando il codice appena riportato con l'opzione -std=c++14 ottengo:
calcolo di 12345^1000 (versione string): 366 ms
calcolo di 12345^1000 (versione vector): 1090 ms
Process returned 0 (0x0) execution time : 1.474 s
Press any key to continue.
- con l'opzione -std=c++17 ottengo:
calcolo di 12345^1000 (versione string): 1409 ms
calcolo di 12345^1000 (versione vector): 1370 ms
Process returned 0 (0x0) execution time : 2.796 s
Press any key to continue.
Non ci sto capendo più nulla!