diff --git a/AWAControl/algorithms/curv.py b/AWAControl/algorithms/curv.py
index d0f856cb795bf149525ee08938bcd14e38e421f9..74a77bb4ac8328ac5b5a17d7487a3a763c81e594 100644
--- a/AWAControl/algorithms/curv.py
+++ b/AWAControl/algorithms/curv.py
@@ -109,4 +109,7 @@ class CurvatureGridOptimize(GridOptimize):
         # ignore locations where the first derivative is less than zero
         result = torch.where(first_order_diff > 0, second_order_diff, 0.0)
 
+        #ignore locations above 0.2 nC
+        result = torch.where(objective_values[:,1:-1] < 0.2, result, 0.0)
+
         return result
diff --git a/AWAControl/configs/awa_camera_config.yml b/AWAControl/configs/awa_camera_config.yml
index e9d1f39cba4e49c6385320ebb833448a6f8bb69c..414b787a43b6ebc1e3e1fb31975460a6beb9dd4c 100644
--- a/AWAControl/configs/awa_camera_config.yml
+++ b/AWAControl/configs/awa_camera_config.yml
@@ -21,20 +21,23 @@ DYG3:
     - 251.55576493685035
     radius: 202.62023172988742
     video_number: 3
+    
 DYG4:
     type: frame_grabber
     center:
-    - 350
+    - 345
     - 277
     radius: 230
     video_number: 4
+    
 DYG5:
     type: frame_grabber
     center:
-    - 293.6070723016849
-    - 248.12799438794332
-    radius: 206.9578032921999
+    - 375
+    - 269
+    radius: 220
     video_number: 5
+    
 DYG7:
     type: blackfly
     center:
@@ -43,6 +46,7 @@ DYG7:
     radius: 350
     ip_address: 192.168.2.106
     gain: 1.0
+    
 DYG14:
     type: blackfly
     center:
@@ -51,6 +55,7 @@ DYG14:
     radius: 350
     ip_address: 192.168.2.111
     gain: 1.0
+    
 DYG15:
     type: blackfly
     center:
@@ -59,13 +64,16 @@ DYG15:
     radius: 350
     ip_address: 192.168.2.111  
     gain: 1.0
+    
 EYG1:
-    type: frame_grabber
+    type: blackfly
     center:
-    - 365.0686486037925
-    - 231.07420853906464
-    radius: 236.83693372959985
-    video_number: 32
+    - 960
+    - 540
+    radius: 380.0
+    ip_address: 192.168.2.43
+    gain: 10
+    
 EYG2:
     type: frame_grabber
     center:
@@ -73,6 +81,7 @@ EYG2:
     - 288.8650297445553
     radius: 243.35671576658976
     video_number: 22
+    
 YAGPerm:
     type: frame_grabber
     center:
@@ -80,6 +89,7 @@ YAGPerm:
     - 235.78765345303117
     radius: 240.27518749518512
     video_number: 30
+    
 EYG3:
     type: frame_grabber
     center:
@@ -87,6 +97,7 @@ EYG3:
     - 228.31807007324025
     radius: 154.73401858047842
     video_number: 28
+    
 EYG5:
     type: frame_grabber
     center:
@@ -94,20 +105,32 @@ EYG5:
     - 224.53486976686574
     radius: 194.8
     video_number: 36
+    
+EYG_BEFORE_TDC:
+    type: frame_grabber
+    center:
+    - 302.59830583753643
+    - 224.53486976686574
+    radius: 194.8
+    video_number: 35
+    
 DYG6:
     type: frame_grabber
     center:
-    - 263.9821715894023
-    - 230.3264518612156
-    radius: 197.20866357719058
-    video_number: 36
+    - 396.25
+    - 271.346
+    radius: 211
+    video_number: 6
+    
 SLIT_YAG:
-    type: frame_grabber
+    type: blackfly
     center:
-    - 232.21534477812514
-    - 229.76375219200324
-    radius: 150
-    video_number: 29
+    - 925
+    - 380
+    radius: 350
+    ip_address: 192.168.2.17
+    gain: 5
+    
 WYG8:
     type: blackfly
     center:
@@ -116,19 +139,71 @@ WYG8:
     radius: 350
     ip_address: 192.168.2.111  
     gain: 1.0
+    
 EYG7:
     type: blackfly
     center:
-    - 1316.661483278318
-    - 609.2225709973745
+    - 1040
+    - 615
     radius: 650.8327079387865
-    ip_address: 192.168.2.46
-    gain: 10
+    ip_address: 192.168.2.42
+    gain: 25
+    
 EYG8:
     type: blackfly
     center:
     - 966.7950192322188
     - 574.5583167280876
     radius: 564.0886521410309
-    ip_address: 192.168.2.36
+    ip_address: 192.168.2.32
     gain: 1
+
+DYG8:
+    type: frame_grabber
+    center:
+    - 337
+    - 216
+    radius: 220
+    video_number: 8
+
+DYG10:
+    type: frame_grabber
+    center:
+    - 337
+    - 216
+    radius: 220
+    video_number: 10
+
+DYG9:
+    type: frame_grabber
+    center:
+    - 337
+    - 216
+    radius: 220
+    video_number: 9
+
+AfterPETSDipole:
+    type: frame_grabber
+    center:
+    - 337
+    - 216
+    radius: 220
+    video_number: 23
+
+Yag12:
+    type: blackfly
+    center:
+    - 939
+    - 644
+    radius: 530
+    ip_address: 192.168.2.3  
+    gain: 1.0
+
+Yag11:
+    type: blackfly
+    center:
+    - 939
+    - 644
+    radius: 530
+    ip_address: 192.168.2.138
+    gain: 1.0
\ No newline at end of file
diff --git a/AWAControl/diagnostics/screen.py b/AWAControl/diagnostics/screen.py
index 8c09c6dcc9bb025ef4b7f06dad09275eabde7e60..e21dd80f10831479a69af29a7fe344b241b7059f 100644
--- a/AWAControl/diagnostics/screen.py
+++ b/AWAControl/diagnostics/screen.py
@@ -6,7 +6,7 @@ from copy import copy
 from pprint import pprint
 from time import sleep
 from typing import List, Optional, Union
-
+import warnings
 import h5py
 import numpy as np
 import pandas as pd
@@ -14,6 +14,7 @@ import yaml
 from epics import caget, caget_many, caput, PV
 from matplotlib import patches, pyplot as plt
 from pydantic import BaseModel, PositiveFloat, PositiveInt, field_validator
+from scipy.ndimage import median_filter
 
 from .. import CAMERA_CONFIG
 
@@ -161,8 +162,9 @@ class AWAImageDiagnostic(BaseModel):
     extra_pvs: List[str] = []
 
     background_file: str = None
-    save_image_location: Union[str, None] = None
+    save_image_location: Union[str] = "."
     roi: Union[RectangularROI, CircularROI] = None
+    filter_size: Optional[int] = None
 
     min_log_intensity: float = 4.0
     bounding_box_half_width: PositiveFloat = 2.0
@@ -233,7 +235,7 @@ class AWAImageDiagnostic(BaseModel):
                 result = {}
 
             results += [result | extra_data]
-            images += [img]
+            images += [raw_img]
 
             sleep(self.wait_time)
 
@@ -340,6 +342,14 @@ class AWAImageDiagnostic(BaseModel):
         if self.roi is not None:
             img = self.roi.crop_image(img.T).T
 
+        # filter image
+        if self.filter_size is not None:
+            img = median_filter(img, self.filter_size)
+            
+        # raise a warning if more than a small percentage is saturated
+        if np.sum(img == np.max(img)) / img.size > 0.0001:
+            warnings.warn("More than 0.01% of the image pixels are the same max value, image might be oversaturated -- fit values may be incorrect")
+
         return img, extra_data, raw_img
 
     def measure_background(self, n_measurements: int = 5, file_location: str = None):
@@ -353,16 +363,25 @@ class AWAImageDiagnostic(BaseModel):
 
         filename = os.path.join(file_location, f"{name}_background.npy")
 
-        print("please shutter beam")
-        input()
+        print(f"saving to {filename}")
+        print("shuttering beam")
+        shutter_pv = "AWA:DO0:Ch01"
+        old_state = caget(shutter_pv)
+
+        # put in shutter
+        #caput(shutter_pv,1)
+        input("please sutter beam")
+        sleep(2)
+        
         images = []
         for i in range(n_measurements):
             images += [self.get_raw_data()[0]]
             sleep(self.wait_time)
 
-        print("please un-shutter beam")
-        input()
-
+        # return shutter to old state
+        #caput(shutter_pv, old_state)
+        input("please unshutter beam")
+        
         # return average
         images = np.stack(images)
         mean = images.mean(axis=0)
@@ -504,6 +523,7 @@ class AWAImageDiagnostic(BaseModel):
             y_projection, visualize=self.visualize, n_restarts=self.n_fitting_restarts
         )
 
+
         return {
             "centroid": np.array((para_x[1], para_y[1])),
             "rms_sizes": np.array((para_x[2], para_y[2])),
@@ -637,6 +657,8 @@ class AWAFrameGrabberDiagnostic(AWAImageDiagnostic):
             # TODO: add runtime error if the img array is empty ->
             # means that the frame grabber is not actively taking measurements
             # (EPICS PV returns all zeros)
+            
             img = img.reshape(480, 640)
+            img = median_filter(img, size=2)
 
         return img, extra_data
diff --git a/AWAControl/diagnostics/utils/roi_finder.py b/AWAControl/diagnostics/utils/roi_finder.py
index d83bc3557bb74150cd327225729f69b52b31219d..15e900ca14037c3723272db0494d41ba26d4847b 100644
--- a/AWAControl/diagnostics/utils/roi_finder.py
+++ b/AWAControl/diagnostics/utils/roi_finder.py
@@ -1,5 +1,5 @@
 from AWAControl.diagnostics.screen import load_screen
-image_diagnostic = load_screen("EYG8")
+image_diagnostic = load_screen("DYG5")
 import matplotlib.pyplot as plt
 
 from circle_detection import ScreenFinder
diff --git a/AWAControl/processes/schottky.py b/AWAControl/processes/schottky.py
index 0f19ee306a08c33b791a67f48b61a893b1385c49..01a9598fb2dcbd90d4b202b8eff1da642a551da3 100644
--- a/AWAControl/processes/schottky.py
+++ b/AWAControl/processes/schottky.py
@@ -1,10 +1,12 @@
 import time
+import numpy as np
 
 import torch
 from xopt import VOCS, Xopt, Evaluator
 from xopt.generators.bayesian.bax_generator import BaxGenerator
 from AWAControl.algorithms.curv import CurvatureGridOptimize
-
+from epics import caget, caput
+from gpytorch.priors import GammaPrior
 from xopt.generators.bayesian.turbo import EntropyTurboController
 import matplotlib.pyplot as plt
 
@@ -14,6 +16,15 @@ warnings.filterwarnings("ignore")
 def run_schottky_scan(
     awa_env, n_steps=10
 ):
+    # LLRF feedback old state
+    pv = "AWALLRF:K1:EnableFC"
+    old_llrf_feedback_state = caget(pv)
+
+    # turn feedback off
+    print("turning llrf feedback off")
+    caput(pv, 0)
+    
+    
     def evaluate(inputs: dict):
         # caput values
         awa_env.set_variables(inputs)
@@ -22,7 +33,7 @@ def run_schottky_scan(
         time.sleep(1)
 
         results = awa_env.get_observables(['ICT1_nC'])
-
+        print(results)
         return results
 
     vocs = VOCS(
@@ -33,17 +44,19 @@ def run_schottky_scan(
     )
 
     algorithm = CurvatureGridOptimize(
-        n_mesh_points=200, minimize=False, observable_names_ordered=["ICT1_nC"]
+        n_mesh_points=200, minimize=False, 
+        observable_names_ordered=["ICT1_nC"], 
+        use_mean=True
     )
 
     # construct BAX generator
     generator = BaxGenerator(
         vocs=vocs, algorithm=algorithm,
-        n_interpolate_points=3, 
-        turbo_controller=EntropyTurboController(vocs)
+        n_interpolate_points=5, 
+        #turbo_controller=EntropyTurboController(vocs)
     )
-    generator.gp_constructor.use_low_noise_prior = False
-    generator.turbo_controller.restrict_model_data = False
+    generator.gp_constructor.custom_noise_prior = GammaPrior(1.202, 8.744)
+    #generator.turbo_controller.restrict_model_data = False
 
     X = Xopt(
         vocs=vocs,
@@ -59,7 +72,7 @@ def run_schottky_scan(
         print(e)
         
     
-    X.evaluate_data({'K1SetPhase': 0})
+    X.evaluate_data({'K1SetPhase': np.linspace(-20,360,20)})
 
     test_x = torch.linspace(*X.vocs.variables["K1SetPhase"], 200)
 
@@ -135,4 +148,8 @@ def run_schottky_scan(
         
     print("Setting K1Phase to "+str(mean+50))
     evaluate({'K1SetPhase': mean+50})
+
+    print("returning LLRF feedback from old state")
+    caput(pv, old_llrf_feedback_state)
+    
     return X