1717from qiskit .quantum_info import Statevector
1818from qiskit .transpiler import CouplingMap
1919
20- from pyqrack import QrackSimulator
20+ from pyqrackising import get_tfim_hamming_distribution
2121
2222
2323# Factor the qubit width for torus dimensions that are close as possible to square
@@ -83,7 +83,7 @@ def add_rzz_pairs(pairs):
8383
8484
8585# Calculate various statistics based on comparison between ideal (Trotterized) and approximate (continuum) measurement distributions.
86- def calc_stats (n_rows , n_cols , ideal_probs , counts , bias , model , shots , depth ):
86+ def calc_stats (n_rows , n_cols , ideal_probs , bias , depth ):
8787 # For QV, we compare probabilities of (ideal) "heavy outputs."
8888 # If the probability is above 2/3, the protocol certifies/passes the qubit width.
8989 n = n_rows * n_cols
@@ -99,22 +99,16 @@ def calc_stats(n_rows, n_cols, ideal_probs, counts, bias, model, shots, depth):
9999 # hamming_bias = [0.0] * (n + 1)
100100 for i in range (n_pow ):
101101 ideal = ideal_probs [i ]
102- count = counts [i ] if i in counts else 0
103- exp = count / shots
104102
105103 # How many bits are 1, in the basis state?
106104 hamming_weight = hamming_distance (i , 0 , n )
107- if model :
108- # How closely grouped are "like" bits to "like"?
109- expected_closeness = expected_closeness_weight (n_rows , n_cols , hamming_weight )
110- # When we add all "closeness" possibilities for the particular Hamming weight, we should maintain the (n+1) mean probability dimensions.
111- normed_closeness = (1 + closeness_like_bits (i , n_rows , n_cols )) / (1 + expected_closeness )
112- # If we're also using conventional simulation, use a normalized weighted average that favors the (n+1)-dimensional model at later times.
113- # The (n+1)-dimensional marginal probability is the product of a function of Hamming weight and "closeness," split among all basis states with that specific Hamming weight.
114- count = (1 - model ) * exp + model * normed_closeness * bias [hamming_weight ] / math .comb (n , hamming_weight )
115- else :
116- count = exp
117- exp = count / shots
105+ # How closely grouped are "like" bits to "like"?
106+ expected_closeness = expected_closeness_weight (n_rows , n_cols , hamming_weight )
107+ # When we add all "closeness" possibilities for the particular Hamming weight, we should maintain the (n+1) mean probability dimensions.
108+ normed_closeness = (1 + closeness_like_bits (i , n_rows , n_cols )) / (1 + expected_closeness )
109+ # Use a normalized weighted average that favors the (n+1)-dimensional model at later times.
110+ # The (n+1)-dimensional marginal probability is the product of a function of Hamming weight and "closeness," split among all basis states with that specific Hamming weight.
111+ count = normed_closeness * bias [hamming_weight ] / math .comb (n , hamming_weight )
118112
119113 # You can make sure this still adds up to 1.0, to show the distribution is normalized:
120114 # total += count
@@ -124,17 +118,16 @@ def calc_stats(n_rows, n_cols, ideal_probs, counts, bias, model, shots, depth):
124118 sum_hog_counts += count
125119
126120 # L2 distance
127- diff_sqr += (ideal - exp ) ** 2
128- z_fidelity += exp if ideal > exp else ideal
129- # hamming_bias[hamming_weight] += (ideal - exp ) ** 2
121+ diff_sqr += (ideal - count ) ** 2
122+ z_fidelity += count if ideal > count else ideal
123+ # hamming_bias[hamming_weight] += (ideal - count ) ** 2
130124
131125 # XEB / EPLG
132126 ideal_centered = ideal - u_u
133127 denom += ideal_centered * ideal_centered
134- numer += ideal_centered * (exp - u_u )
128+ numer += ideal_centered * (count - u_u )
135129
136130 l2_difference = diff_sqr ** (1 / 2 )
137- hog_prob = sum_hog_counts / shots
138131 xeb = numer / denom
139132
140133 # This should be ~1.0, if we're properly normalized.
@@ -146,7 +139,7 @@ def calc_stats(n_rows, n_cols, ideal_probs, counts, bias, model, shots, depth):
146139 "depth" : depth ,
147140 "l2_difference" : float (l2_difference ),
148141 "z_fidelity" : float (z_fidelity ),
149- "hog_prob" : hog_prob ,
142+ "hog_prob" : sum_hog_counts ,
150143 "xeb" : xeb ,
151144 }
152145
@@ -254,7 +247,7 @@ def main():
254247 if len (sys .argv ) > 4 :
255248 t1 = float (sys .argv [4 ])
256249 else :
257- t1 = dt
250+ t1 = dt * dt
258251 if len (sys .argv ) > 5 :
259252 shots = int (sys .argv [5 ])
260253 else :
@@ -280,25 +273,8 @@ def main():
280273 # The Aer circuit also starts with this initialization
281274 qc_aer = qc .copy ()
282275
283- # Compile a single Trotter step for QrackSimulator.
284- step = QuantumCircuit (n_qubits )
285- trotter_step (step , qubits , (n_rows , n_cols ), J , h , dt )
286- step = transpile (
287- step ,
288- optimization_level = 3 ,
289- basis_gates = QrackSimulator .get_qiskit_basis_gates (),
290- )
291-
292276 # If we're using conventional simulation in the approximation model, collect samples over the depth series.
293- experiment_probs = [{}] * (depth + 1 )
294- experiment = QrackSimulator (n_qubits )
295- experiment .run_qiskit_circuit (qc )
296- counts = dict (Counter (experiment .measure_shots (qubits , shots )))
297-
298- for key , value in counts .items ():
299- experiment_probs [0 ][key ] = (
300- experiment_probs [0 ].get (key , 0 ) + value / shots
301- )
277+ bias_0 = get_tfim_hamming_distribution (J = J , h = h , z = 4 , theta = theta , t = 0 , n_qubits = n_qubits )
302278
303279 control = AerSimulator (method = "statevector" )
304280 qc_aer = transpile (
@@ -314,33 +290,21 @@ def main():
314290 n_rows ,
315291 n_cols ,
316292 control_probs ,
317- experiment_probs [0 ],
318- [],
319- 0 ,
320- shots ,
321- 0 ,
293+ bias_0 ,
294+ 0
322295 )
323296
324297 # Add up the square residuals:
325298 r_squared = result ["l2_difference" ] ** 2
326299
327300 n_bias = n_qubits + 1
301+ mid = n_qubits / 2
328302 bias_0 = np .zeros (n_bias , dtype = np .float64 )
329303 magnetization_0 , sqr_magnetization_0 = 0 , 0
330- for key , value in experiment_probs [0 ].items ():
331- m = 0
332- h = 0
333- for _ in range (n_qubits ):
334- if key & 1 :
335- m -= 1
336- h += 1
337- else :
338- m += 1
339- key >>= 1
340- m /= n_qubits
304+ for key , value in enumerate (bias_0 ):
305+ m = key - mid
341306 magnetization_0 += value * m
342307 sqr_magnetization_0 += value * m * m
343- bias_0 [h ] += value
344308
345309 c_magnetization , c_sqr_magnetization = 0 , 0
346310 for p in range (1 << n_qubits ):
@@ -437,11 +401,8 @@ def main():
437401 n_rows ,
438402 n_cols ,
439403 control_probs ,
440- experiment_probs [d ],
441404 bias ,
442- model ,
443- shots ,
444- d ,
405+ d
445406 )
446407
447408 # Add up the square residuals:
0 commit comments