#include <bits/stdc++.h>
using namespace std;
using ll = long long;
struct Fenwick {
int n;
vector<ll> bit;
Fenwick(int _n=0){ init(_n); }
void init(int _n){
n = _n;
bit.assign(n+1, 0);
}
void add(int idx, ll val){
for(; idx<=n; idx += idx&-idx) bit[idx] += val;
}
ll sumPrefix(int idx){
ll r=0;
for(; idx>0; idx -= idx&-idx) r += bit[idx];
return r;
}
ll rangeSum(int l, int r){
if(r < l) return 0;
return sumPrefix(r) - sumPrefix(l-1);
}
};
int main(){
ios::sync_with_stdio(false);
cin.tie(nullptr);
int n, q;
if(!(cin >> n >> q)) return 0;
vector<int> a(n+1);
for(int i=1;i<=n;i++) cin >> a[i];
// compress values
vector<int> vals(a.begin()+1, a.end());
sort(vals.begin(), vals.end());
vals.erase(unique(vals.begin(), vals.end()), vals.end());
for(int i=1;i<=n;i++){
a[i] = int(lower_bound(vals.begin(), vals.end(), a[i]) - vals.begin());
}
int V = vals.size();
// positions per value
vector<vector<int>> pos(V);
for(int i=1;i<=n;i++){
pos[a[i]].push_back(i);
}
int B = max(320, (int)sqrt(n)); // threshold, tweakable
vector<int> isHeavy(V, 0);
vector<int> heavyList;
for(int v=0; v<V; ++v){
if((int)pos[v].size() > B){
isHeavy[v] = 1;
heavyList.push_back(v);
}
}
// prefix arrays for each value: prefPos (sum p) and prefJP (sum j*p where j is 0-based index in pos)
// We'll store prefP0[i] = sum_{t=0..i-1} pos[t], prefJP[i] = sum_{t=0..i-1} t*pos[t]
vector<vector<ll>> prefP0(V), prefJP(V);
for(int v=0; v<V; ++v){
int m = pos[v].size();
prefP0[v].assign(m+1, 0);
prefJP[v].assign(m+1, 0);
for(int t=0; t<m; ++t){
prefP0[v][t+1] = prefP0[v][t] + pos[v][t];
prefJP[v][t+1] = prefJP[v][t] + 1LL * t * pos[v][t];
}
}
// Read queries, store and group by r for offline processing for light values
struct Query { int l, r, idx; };
vector<vector<Query>> qryAtR(n+1);
vector<ll> ans(q, 0);
for(int i=0;i<q;i++){
int l,r; cin >> l >> r;
qryAtR[r].push_back({l,r,i});
}
// Prepare events for light-values: for each pair (i<j) of same light-value, we push i into events[j]
// Then when sweep r from 1..n, for each i in events[r] we add updates at position i:
// BIT_count.add(i, 1); BIT_sum.add(i, j);
vector<vector<int>> events(n+1);
ll totalPairsLight = 0;
for(int v=0; v<V; ++v){
if(isHeavy[v]) continue;
auto &P = pos[v];
int m = P.size();
// generate all pairs i<j
for(int i=0;i<m;i++){
for(int j=i+1;j<m;j++){
events[P[j]].push_back(P[i]);
totalPairsLight++;
}
}
}
// cerr << "totalPairsLight = " << totalPairsLight << "\n";
// Fenwick trees: one for count of pairs (indexed by i), one for sum of j values
Fenwick bitCnt(n), bitSum(n);
// Sweep r = 1..n
for(int r=1; r<=n; ++r){
// apply all events at r (these correspond to all pairs (i,r) where value is light and i < r)
for(int iPos : events[r]){
bitCnt.add(iPos, 1);
bitSum.add(iPos, r);
}
// answer queries with this right endpoint r: contribution from light-values
for(auto &Q : qryAtR[r]){
int l = Q.l;
int idx = Q.idx;
ll cnt = bitCnt.rangeSum(l, r); // number of pairs with i in [l,r] and j <= r
ll sumJ = bitSum.rangeSum(l, r); // sum of j over those pairs
ll eq_light = (r + 1LL) * cnt - sumJ;
ans[idx] = eq_light; // we store EQ_light for now; will subtract from TOTAL and add heavy later
}
}
// Now for each query we need TOTAL(l,r) and EQ_heavy(l,r) and combine:
// Precompute nothing else: compute heavy contributions per query directly.
// We'll process queries again in original order.
for(int r=1; r<=n; ++r){
// nothing here; heavy handled per query below
}
// For convenience, gather queries in a vector with original order to compute final answers
// We need to re-read queries? We stored queries only grouped by r; but we need (l,r) for each index.
// Let's reconstruct array of queries by scanning qryAtR.
vector<pair<int,int>> queries(q);
for(int r=1; r<=n; ++r){
for(auto &Q : qryAtR[r]){
queries[Q.idx] = {Q.l, Q.r};
}
}
// Function to compute TOTAL(l,r)
auto computeTotal = [&](int l, int r)->ll {
if(l >= r) return 0LL;
ll m = r - l;
// total = (m+1) * (m*(m+1)/2) - (m*(m+1)*(2*m+1)/6)
ll term1 = (m + 1) * (m * (m + 1) / 2);
ll term2 = (m * (m + 1) * (2*m + 1) / 6);
return term1 - term2;
};
// For each query compute heavy contribution and finalize answer
for(int idx=0; idx<q; ++idx){
int l = queries[idx].first;
int r = queries[idx].second;
ll total = computeTotal(l, r);
ll eq = ans[idx]; // from light
// add heavy contributions
for(int v : heavyList){
auto &P = pos[v];
// find L0 = first index in P with >= l, R0 = last index <= r
int L0 = int(lower_bound(P.begin(), P.end(), l) - P.begin());
int R0 = int(upper_bound(P.begin(), P.end(), r) - P.begin()) - 1;
if(L0 <= R0){
int cnt = R0 - L0 + 1;
if(cnt >= 2){
// sum_k = cnt*(cnt-1)/2
ll sumk = 1LL * cnt * (cnt - 1) / 2;
// Spos = sum_{j=L0+1..R0} (j-L0) * P[j]
// = (sum j*P[j]) - L0*(sum P[j]) over j=L0+1..R0 where j is 0-based index in P
// Using pref arrays prefP0 and prefJP:
int left = L0 + 1; // inclusive index in pref arrays (since pref arrays are prefix up to t-1)
int right = R0 + 1; // exclusive indexing for pref arrays usage
// sum P[j] for j = L0+1..R0 = prefP0[right] - prefP0[left]
ll sumP = prefP0[v][right] - prefP0[v][left];
// sum j*P[j] for j = L0+1..R0 = prefJP[right] - prefJP[left]
ll sumJP = prefJP[v][right] - prefJP[v][left];
ll Spos = sumJP - 1LL * L0 * sumP;
ll termv = (r + 1LL) * sumk - Spos;
eq += termv;
}
}
}
ll res = total - eq;
cout << res << '\n';
}
return 0;
}